path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
334762/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1
people2 = pd.merge(people, hstry, on='people_id', how='inner')
people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64)
people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64)
people2['profit'] = people2['profit'].fillna('0').astype(np.int64)
xfeats = list(people2.columns)
xfeats.remove('people_id')
xfeats.remove('profit')
xfeats.remove('prof_label')
xfeats.remove('positive_counts')
xfeats.remove('negative_counts')
print(xfeats)
X, Y = (people2[xfeats], people2['prof_label']) | code |
334762/cell_20 | [
"text_html_output_1.png"
] | code |
|
334762/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
goods = act_train[act_train['outcome'] == 1]
bads = act_train[act_train['outcome'] == 0]
goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good')
bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad')
plt.legend()
plt.show() | code |
334762/cell_29 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1
people2 = pd.merge(people, hstry, on='people_id', how='inner')
people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64)
people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64)
people2['profit'] = people2['profit'].fillna('0').astype(np.int64)
xfeats = list(people2.columns)
xfeats.remove('people_id')
xfeats.remove('profit')
xfeats.remove('prof_label')
xfeats.remove('positive_counts')
xfeats.remove('negative_counts')
X, Y = (people2[xfeats], people2['prof_label'])
people2[['prof_label', 'pred']].sample(20) | code |
334762/cell_26 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostRegressor
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1
people2 = pd.merge(people, hstry, on='people_id', how='inner')
people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64)
people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64)
people2['profit'] = people2['profit'].fillna('0').astype(np.int64)
xfeats = list(people2.columns)
xfeats.remove('people_id')
xfeats.remove('profit')
xfeats.remove('prof_label')
xfeats.remove('positive_counts')
xfeats.remove('negative_counts')
X, Y = (people2[xfeats], people2['prof_label'])
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
clf = RandomForestRegressor(n_estimators=50)
clf.fit(X_train, y_train)
sortedfeats = sorted(zip(xfeats, clf.feature_importances_), key=lambda x: x[1])
newfeats = []
for i in range(1, 6):
newfeats.append(sortedfeats[len(sortedfeats) - i])
newfeats = [x[0] for x in newfeats]
X, Y = (people2[newfeats], people2['prof_label'])
X_train2, X_test2, y_train2, y_test2 = train_test_split(X, Y, test_size=0.2, random_state=42)
clf2 = RandomForestRegressor(n_estimators=100)
clf2.fit(X_train2, y_train2)
print(clf2.feature_importances_) | code |
334762/cell_28 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostRegressor
from sklearn.metrics import auc, mean_squared_error
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1
people2 = pd.merge(people, hstry, on='people_id', how='inner')
people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64)
people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64)
people2['profit'] = people2['profit'].fillna('0').astype(np.int64)
xfeats = list(people2.columns)
xfeats.remove('people_id')
xfeats.remove('profit')
xfeats.remove('prof_label')
xfeats.remove('positive_counts')
xfeats.remove('negative_counts')
X, Y = (people2[xfeats], people2['prof_label'])
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
clf = RandomForestRegressor(n_estimators=50)
clf.fit(X_train, y_train)
sortedfeats = sorted(zip(xfeats, clf.feature_importances_), key=lambda x: x[1])
newfeats = []
for i in range(1, 6):
newfeats.append(sortedfeats[len(sortedfeats) - i])
newfeats = [x[0] for x in newfeats]
X, Y = (people2[newfeats], people2['prof_label'])
X_train2, X_test2, y_train2, y_test2 = train_test_split(X, Y, test_size=0.2, random_state=42)
clf2 = RandomForestRegressor(n_estimators=100)
clf2.fit(X_train2, y_train2)
people2['pred'] = clf.predict(people2[xfeats])
people2['pred2'] = clf2.predict(people2[newfeats]) | code |
334762/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
goods = act_train[act_train['outcome'] == 1]
bads = act_train[act_train['outcome'] == 0]
goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good')
bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
plt.figure()
plt.hist(hstry['prof_label'], 4, range=(1, 5))
plt.show() | code |
334762/cell_24 | [
"image_output_11.png",
"image_output_24.png",
"image_output_25.png",
"text_plain_output_5.png",
"text_plain_output_15.png",
"image_output_17.png",
"text_plain_output_9.png",
"image_output_14.png",
"image_output_28.png",
"text_plain_output_20.png",
"image_output_23.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"image_output_13.png",
"image_output_5.png",
"text_plain_output_14.png",
"image_output_18.png",
"image_output_21.png",
"text_plain_output_27.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"text_plain_output_25.png",
"image_output_20.png",
"text_plain_output_18.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_22.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_16.png",
"image_output_16.png",
"text_plain_output_8.png",
"text_plain_output_26.png",
"image_output_27.png",
"image_output_6.png",
"text_plain_output_23.png",
"image_output_12.png",
"text_plain_output_28.png",
"image_output_22.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"text_plain_output_19.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png",
"image_output_26.png"
] | from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostRegressor
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
clf = RandomForestRegressor(n_estimators=50)
clf.fit(X_train, y_train)
print(clf.feature_importances_) | code |
334762/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostRegressor
from sklearn.metrics import auc, mean_squared_error
from sklearn.cross_validation import train_test_split, cross_val_score | code |
334762/cell_10 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10) | code |
334762/cell_27 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostRegressor
from sklearn.metrics import auc, mean_squared_error
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1
people2 = pd.merge(people, hstry, on='people_id', how='inner')
people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64)
people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64)
people2['profit'] = people2['profit'].fillna('0').astype(np.int64)
xfeats = list(people2.columns)
xfeats.remove('people_id')
xfeats.remove('profit')
xfeats.remove('prof_label')
xfeats.remove('positive_counts')
xfeats.remove('negative_counts')
X, Y = (people2[xfeats], people2['prof_label'])
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
clf = RandomForestRegressor(n_estimators=50)
clf.fit(X_train, y_train)
sortedfeats = sorted(zip(xfeats, clf.feature_importances_), key=lambda x: x[1])
newfeats = []
for i in range(1, 6):
newfeats.append(sortedfeats[len(sortedfeats) - i])
newfeats = [x[0] for x in newfeats]
X, Y = (people2[newfeats], people2['prof_label'])
X_train2, X_test2, y_train2, y_test2 = train_test_split(X, Y, test_size=0.2, random_state=42)
clf2 = RandomForestRegressor(n_estimators=100)
clf2.fit(X_train2, y_train2)
print(clf.score(X_test, y_test), clf2.score(X_test2, y_test2))
print(mean_squared_error(clf.predict(X_test), y_test), mean_squared_error(clf2.predict(X_test2), y_test2)) | code |
334762/cell_12 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
hstry['profit'].describe() | code |
333041/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
events = pd.read_csv('{0}events.csv'.format(DATA_PATH)).loc[:, ['timestamp', 'device_id']]
events['timestamp'] = pd.to_datetime(events['timestamp'])
ax = sns.distplot(events['hour'])
ax.set_title('Events by hour')
ax.set_xlim(xmin = 0, xmax = 24)
ax.set_xlabel('Hour of day')
plt.show()
ax1 = sns.distplot(events['hour_recentred'])
ax1.set_xlim(xmin = -2, xmax = 22)
ax1.set_xlabel('Hour of day')
ax1.set_title('Events by hour -- recentered')
age_sex = pd.read_csv('{0}gender_age_train.csv'.format(DATA_PATH)).drop('group', axis=1)
age_sex_event = age_sex.merge(events, 'inner', on='device_id').drop_duplicates().drop('device_id', axis=1)
age_sex_event['bin'] = pd.cut(age_sex_event['hour_recentred'], [-2, 2, 7, 22])
ax = sns.violinplot(x="bin", y="age", data = age_sex_event)
ax.set_ylim(ymin = 18, ymax = 55)
ax.set_xlabel('Time of day')
ax.set_title('Age distribution by time of day')
ax_violin = sns.violinplot(x='bin', y='age', hue='gender', split=False, data=age_sex_event)
ax_violin.set_ylim(ymin=18, ymax=55)
ax_violin.set_xlabel('Time of day')
ax_violin.set_title('Age distribution by time of day and gender')
ax_violin.legend(bbox_to_anchor=(1.05, 1), loc=2) | code |
333041/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
events = pd.read_csv('{0}events.csv'.format(DATA_PATH)).loc[:, ['timestamp', 'device_id']]
events['timestamp'] = pd.to_datetime(events['timestamp'])
ax = sns.distplot(events['hour'])
ax.set_title('Events by hour')
ax.set_xlim(xmin=0, xmax=24)
ax.set_xlabel('Hour of day')
plt.show() | code |
333041/cell_17 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
events = pd.read_csv('{0}events.csv'.format(DATA_PATH)).loc[:, ['timestamp', 'device_id']]
events['timestamp'] = pd.to_datetime(events['timestamp'])
ax = sns.distplot(events['hour'])
ax.set_title('Events by hour')
ax.set_xlim(xmin = 0, xmax = 24)
ax.set_xlabel('Hour of day')
plt.show()
ax1 = sns.distplot(events['hour_recentred'])
ax1.set_xlim(xmin = -2, xmax = 22)
ax1.set_xlabel('Hour of day')
ax1.set_title('Events by hour -- recentered')
age_sex = pd.read_csv('{0}gender_age_train.csv'.format(DATA_PATH)).drop('group', axis=1)
age_sex_event = age_sex.merge(events, 'inner', on='device_id').drop_duplicates().drop('device_id', axis=1)
age_sex_event['bin'] = pd.cut(age_sex_event['hour_recentred'], [-2, 2, 7, 22])
ax = sns.violinplot(x='bin', y='age', data=age_sex_event)
ax.set_ylim(ymin=18, ymax=55)
ax.set_xlabel('Time of day')
ax.set_title('Age distribution by time of day') | code |
333041/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
events = pd.read_csv('{0}events.csv'.format(DATA_PATH)).loc[:, ['timestamp', 'device_id']]
events['timestamp'] = pd.to_datetime(events['timestamp'])
ax = sns.distplot(events['hour'])
ax.set_title('Events by hour')
ax.set_xlim(xmin = 0, xmax = 24)
ax.set_xlabel('Hour of day')
plt.show()
ax1 = sns.distplot(events['hour_recentred'])
ax1.set_xlim(xmin=-2, xmax=22)
ax1.set_xlabel('Hour of day')
ax1.set_title('Events by hour -- recentered') | code |
72081461/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = buying_price.stack().reset_index().rename(columns={0: 'value'})
sns.barplot(x=stacked['buying_price'], y=stacked['value'], hue=stacked['acceptability']) | code |
72081461/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
dev_car.isnull().sum() | code |
72081461/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
dev_car.head() | code |
72081461/cell_4 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
os.path.isfile('../input/week1-car-acceptability/car_acc_train.csv') | code |
72081461/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
maintenance_price | code |
72081461/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = buying_price.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['buying_price'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = maintenance_price.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['maintenance_price'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = number_of_doors.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['number_of_doors'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = carry_capacity.stack().reset_index().rename(columns={0: 'value'})
sns.barplot(x=stacked['carry_capacity'], y=stacked['value'], hue=stacked['acceptability']) | code |
72081461/cell_33 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = buying_price.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['buying_price'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = maintenance_price.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['maintenance_price'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = number_of_doors.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['number_of_doors'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = carry_capacity.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['carry_capacity'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = trunk_size.stack().reset_index().rename(columns={0: 'value'})
sns.barplot(x=stacked['trunk_size'], y=stacked['value'], hue=stacked['acceptability']) | code |
72081461/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
buying_price | code |
72081461/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
len(dev_car) | code |
72081461/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
carry_capacity | code |
72081461/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
number_of_doors | code |
72081461/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72081461/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
len(test_car[test_car['carry_capacity'] == '3']) | code |
72081461/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
trunk_size | code |
72081461/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
train_car.describe() | code |
72081461/cell_35 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
safety | code |
72081461/cell_24 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = buying_price.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['buying_price'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = maintenance_price.stack().reset_index().rename(columns={0: 'value'})
sns.barplot(x=stacked['maintenance_price'], y=stacked['value'], hue=stacked['acceptability']) | code |
72081461/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
test_car.isnull().sum() | code |
72081461/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
(len(train_car), len(test_car), len(dev_car)) | code |
72081461/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum()
train_car.dropna(inplace=True)
buying_price = pd.crosstab(train_car['buying_price'], train_car['acceptability'])
maintenance_price = pd.crosstab(train_car['maintenance_price'], train_car['acceptability'])
number_of_doors = pd.crosstab(train_car['number_of_doors'], train_car['acceptability'])
carry_capacity = pd.crosstab(train_car['carry_capacity'], train_car['acceptability'])
trunk_size = pd.crosstab(train_car['trunk_size'], train_car['acceptability'])
safety = pd.crosstab(train_car['safety'], train_car['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = buying_price.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['buying_price'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = maintenance_price.stack().reset_index().rename(columns={0:'value'})
sns.barplot(x=stacked['maintenance_price'], y=stacked['value'], hue=stacked['acceptability'])
f, ax = plt.subplots(figsize=(9, 9))
stacked = number_of_doors.stack().reset_index().rename(columns={0: 'value'})
sns.barplot(x=stacked['number_of_doors'], y=stacked['value'], hue=stacked['acceptability']) | code |
72081461/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_car = pd.read_csv('../input/week1-car-acceptability/car_acc_train.csv')
train_car.dropna(inplace=True)
test_car = pd.read_csv('../input/it2034ch1502-car-acceptability-prediction/test.csv')
dev_car = pd.read_csv('../input/week1-car-acceptability/car_acc_dev_v2.csv')
train_car.dropna(inplace=True)
train_car.isnull().sum() | code |
130013615/cell_14 | [
"text_plain_output_1.png"
] | from tqdm import tqdm
import time
import time
from tqdm import tqdm
with tqdm(total=200) as pbar:
pbar.set_description('Processing')
for i in range(20):
time.sleep(0.1)
pbar.update(10) | code |
130013615/cell_5 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | !pip install -U sentence-transformers
!pip install openpyxl | code |
1010388/cell_40 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from statsmodels.graphics.factorplots import interaction_plot
from statsmodels.graphics.factorplots import interaction_plot
categorical_columnss = categorical_columns + counting_columns + bounded_columns
for c in categorical_columnss:
if c in temporal_columns:
continue
num = recent_df['SalePrice']
c1 = recent_df[c]
delete = []
for cc in categorical_columnss:
if cc in temporal_columns or cc == c or cc in delete:
continue
c2 = recent_df[cc]
c1_classes = len(recent_df[c])
c2_classes = len(recent_df[cc])
if c2_classes < c1_classes:
temp = c1
c1 = c2
c2 = temp
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(9, 6))
fig = interaction_plot(c2, c1, num, ms=12, ax=ax)
plt.show()
delete.append(cc) | code |
49120206/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
Reg = LinearRegression()
Reg.fit(X_train, y_train) | code |
49120206/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ec = pd.read_csv('../input/ecommerce-customers/Ecommerce Customers.csv')
ec.info() | code |
49120206/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ec = pd.read_csv('../input/ecommerce-customers/Ecommerce Customers.csv')
ec.columns | code |
49120206/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49120206/cell_18 | [
"text_plain_output_1.png"
] | Pred = y = m * c + b | code |
49120206/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
Reg = LinearRegression()
Reg.fit(X_train, y_train)
Reg.predict([[31, 11, 37, 2]])
Reg.coef_ | code |
49120206/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
Reg = LinearRegression()
Reg.fit(X_train, y_train)
Reg.predict([[31, 11, 37, 2]])
Reg.coef_
Reg.intercept_ | code |
49120206/cell_17 | [
"text_plain_output_1.png"
] | 31 * 24.84191503 + 11 * 38.33120482 + 37 * 0.18325228 + 2 * 61.48057858 + -1007.25872361 | code |
49120206/cell_14 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
Reg = LinearRegression()
Reg.fit(X_train, y_train)
Reg.predict([[31, 11, 37, 2]]) | code |
49120206/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ec = pd.read_csv('../input/ecommerce-customers/Ecommerce Customers.csv')
ec.head() | code |
74064874/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
office_example_normalized.head() | code |
74064874/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.head() | code |
74064874/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
office_example.info() | code |
74064874/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
office_example_normalized_monthly = office_example_normalized.resample('M').sum()
office_example_normalized_monthly.plot(kind='bar', figsize=(10, 4), title='Energy Consumption per Square Meter Floor Area') | code |
74064874/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
meta[meta.site_id == 'Wolf'].sqm | code |
74064874/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
site_example_elec_meter_data = elec_all_data.loc[:, elec_all_data.columns.str.contains('Wolf')]
meta[meta.site_id == 'Wolf'].sqm
site_example_elec_meter_data_normalized = site_example_elec_meter_data.div(meta[meta.site_id == 'Wolf'].sqm)
site_example_elec_meter_data_normalized.info() | code |
74064874/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
elec_all_data.head() | code |
74064874/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
meta[meta.site_id == 'Wolf'].head() | code |
74064874/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
site_example_elec_meter_data = elec_all_data.loc[:, elec_all_data.columns.str.contains('Wolf')]
site_example_elec_meter_data.head() | code |
74064874/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
office_example.head() | code |
74064874/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
site_example_elec_meter_data = elec_all_data.loc[:, elec_all_data.columns.str.contains('Wolf')]
meta[meta.site_id == 'Wolf'].sqm
site_example_elec_meter_data_normalized = site_example_elec_meter_data.div(meta[meta.site_id == 'Wolf'].sqm)
site_example_elec_meter_data_normalized.head() | code |
74064874/cell_28 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
meta.head() | code |
74064874/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
office_example.head() | code |
74064874/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname] | code |
74064874/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm'] | code |
74064874/cell_35 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
site_example_elec_meter_data = elec_all_data.loc[:, elec_all_data.columns.str.contains('Wolf')]
meta[meta.site_id == 'Wolf'].sqm
site_example_elec_meter_data_normalized = site_example_elec_meter_data.div(meta[meta.site_id == 'Wolf'].sqm)
site_example_elec_meter_data.sort_index(axis=1).iloc[:, -10:].sum().plot(kind='bar', figsize=(10, 5)) | code |
74064874/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.info() | code |
74064874/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
office_example.plot(figsize=(15, 6)) | code |
74064874/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
site_example_elec_meter_data = elec_all_data.loc[:, elec_all_data.columns.str.contains('Wolf')]
site_example_elec_meter_data.info() | code |
74064874/cell_37 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
buildingname = 'Panther_office_Hannah'
office_example = pd.DataFrame(elec_all_data[buildingname].truncate(before='2017-01-01'))
meta = pd.read_csv('../input/buildingdatagenomeproject2/metadata.csv', index_col='building_id')
meta.loc[buildingname]
meta.loc[buildingname]['sqm']
office_example_normalized = office_example / meta.loc[buildingname]['sqm']
site_example_elec_meter_data = elec_all_data.loc[:, elec_all_data.columns.str.contains('Wolf')]
meta[meta.site_id == 'Wolf'].sqm
site_example_elec_meter_data_normalized = site_example_elec_meter_data.div(meta[meta.site_id == 'Wolf'].sqm)
site_example_elec_meter_data_normalized.sort_index(axis=1).iloc[:, -10:].sum().plot(kind='bar', figsize=(10, 5)) | code |
74064874/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
elec_all_data = pd.read_csv('../input/buildingdatagenomeproject2/electricity_cleaned.csv', index_col='timestamp', parse_dates=True)
elec_all_data.info() | code |
18157974/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
df.columns
sns.countplot(df['total_quantity_of_items_in_container']) | code |
18157974/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum() | code |
18157974/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns | code |
18157974/cell_30 | [
"text_plain_output_1.png"
] | from catboost import CatBoostRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
df.columns
hr = []
minutes = []
seconds = []
for i in range(len(df['Start_Time_of_Picking'])):
hr.append(int(df.iloc[i]['Start_Time_of_Picking'].split(':')[0]))
a, b = df.iloc[i]['Start_Time_of_Picking'].split(':')[1].split('.')
minutes.append(int(a))
seconds.append(int(b))
df = pd.concat([df, pd.get_dummies(df['SKU'], drop_first=True, prefix=1)], axis=1, sort=False)
df = pd.concat([df, pd.get_dummies(df['User'], drop_first=True, prefix=2)], axis=1, sort=False)
df = pd.concat([df, pd.get_dummies(df['number_of_container_conveyor'], drop_first=True, prefix=3)], axis=1, sort=False)
df = pd.concat([df, pd.get_dummies(df['last_station_served_by_user'], drop_first=True, prefix=3)], axis=1, sort=False)
df = df.drop(['SKU', 'User', 'number_of_container_conveyor', 'last_station_served_by_user'], axis=1)
target = df.Pick_Time
del df['Pick_Time']
X_train, X_test, y_train, y_test = train_test_split(df, target, test_size=0.3, shuffle=False)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.linear_model import Lasso, Ridge, BayesianRidge, ElasticNet, HuberRegressor, LinearRegression, LogisticRegression, SGDRegressor
from sklearn.metrics import mean_squared_error
import numpy as np
import warnings
import keras
warnings.filterwarnings('ignore')
model = CatBoostRegressor(logging_level='Silent')
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print('Lasso', np.sqrt(mean_squared_error(y_test, predictions))) | code |
18157974/cell_20 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
df.columns
df['day'].nunique() | code |
18157974/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape | code |
18157974/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
df.columns | code |
18157974/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers import LSTM
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from keras.callbacks import EarlyStopping
import math | code |
18157974/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
sns.scatterplot(x='Actual_Quantity', y='Pick_Time', data=df) | code |
18157974/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i])) | code |
18157974/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
df | code |
18157974/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape | code |
18157974/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
sns.boxplot(x='Actual_Quantity', y='Pick_Time', data=df) | code |
18157974/cell_31 | [
"text_plain_output_1.png"
] | from catboost import CatBoostRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso,Ridge,BayesianRidge,ElasticNet,HuberRegressor,LinearRegression,LogisticRegression,SGDRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
df.columns
hr = []
minutes = []
seconds = []
for i in range(len(df['Start_Time_of_Picking'])):
hr.append(int(df.iloc[i]['Start_Time_of_Picking'].split(':')[0]))
a, b = df.iloc[i]['Start_Time_of_Picking'].split(':')[1].split('.')
minutes.append(int(a))
seconds.append(int(b))
df = pd.concat([df, pd.get_dummies(df['SKU'], drop_first=True, prefix=1)], axis=1, sort=False)
df = pd.concat([df, pd.get_dummies(df['User'], drop_first=True, prefix=2)], axis=1, sort=False)
df = pd.concat([df, pd.get_dummies(df['number_of_container_conveyor'], drop_first=True, prefix=3)], axis=1, sort=False)
df = pd.concat([df, pd.get_dummies(df['last_station_served_by_user'], drop_first=True, prefix=3)], axis=1, sort=False)
df = df.drop(['SKU', 'User', 'number_of_container_conveyor', 'last_station_served_by_user'], axis=1)
target = df.Pick_Time
del df['Pick_Time']
X_train, X_test, y_train, y_test = train_test_split(df, target, test_size=0.3, shuffle=False)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.linear_model import Lasso, Ridge, BayesianRidge, ElasticNet, HuberRegressor, LinearRegression, LogisticRegression, SGDRegressor
from sklearn.metrics import mean_squared_error
import numpy as np
import warnings
import keras
warnings.filterwarnings('ignore')
model = CatBoostRegressor(logging_level='Silent')
model.fit(X_train, y_train)
predictions = model.predict(X_test)
classifiers = [['DecisionTree :', DecisionTreeRegressor()], ['RandomForest :', RandomForestRegressor()], ['KNeighbours :', KNeighborsRegressor(n_neighbors=2)], ['AdaBoostClassifier :', AdaBoostRegressor()], ['GradientBoostingClassifier: ', GradientBoostingRegressor()], ['Xgboost: ', XGBRegressor()], ['CatBoost: ', CatBoostRegressor(logging_level='Silent')], ['Lasso: ', Lasso()], ['Ridge: ', Ridge()], ['BayesianRidge: ', BayesianRidge()], ['ElasticNet: ', ElasticNet()], ['HuberRegressor: ', HuberRegressor()]]
print('Accuracy Results...')
for name, classifier in classifiers:
classifier = classifier
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
print(name, np.sqrt(mean_squared_error(y_test, predictions))) | code |
18157974/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
sns.boxplot(x='Actual_Quantity', y='Pick_Time', data=df) | code |
18157974/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15,15))
sns.heatmap(df.corr(),annot=True, linewidths=.5,ax=ax)
for i in np.unique(df['Actual_Quantity']):
remove_outlier_cat(df, df.groupby(['Actual_Quantity', i]))
df.columns
df['total_quantity_of_items_in_container'].value_counts() | code |
18157974/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df.shape
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out
def remove_outlier_cat(df_in, group):
q1 = group.quantile(0.25)
q3 = group.quantile(0.75)
iqr = q3 - q1
fence_low = q1 - 1.5 * iqr
fence_high = q3 + 1.5 * iqr
df_out = df_in.loc[(group > fence_low) & (group < fence_high)]
return df_out
for i in range(10):
df = remove_outlier(df, 'Pick_Time')
df.isnull().sum()
fig, ax = plt.subplots(figsize=(15, 15))
sns.heatmap(df.corr(), annot=True, linewidths=0.5, ax=ax) | code |
18157974/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/pick_time_warehouse_train.csv')
df.shape
df.columns
df['Pick_Time'].plot.box(grid=True) | code |
1008041/cell_1 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
from sklearn.ensemble import RandomForestClassifier
df = pd.read_json('../input/train.json')
df.head()
df.shape() | code |
1008041/cell_3 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
from sklearn.ensemble import RandomForestClassifier
df = pd.read_json('../input/train.json')
df.shape()
df = pd.read_json('../input/train.json')
df['num_feature'] = df['features'].apply(len)
df['num_photos'] = df['photos'].apply(len)
df['num_description_words'] = df['description'].apply(lambda x: len(x.split(' ')))
interest_num = {'low': 0, 'medium': 0.5, 'high': 1}
df['interest_num'] = df['interest_level'].apply(lambda x: interest_num[x])
choose_feature = ['bathrooms', 'bedroom', 'price', 'num_feature', 'num_description_words', 'interest_num']
print(df[df.isnull()]) | code |
74040768/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
path = '../input/pizza-price-prediction/pizza_v1.csv'
df = pd.read_csv(path)
correlation = df.corr()
plt.figure(figsize=(15, 8))
sns.heatmap(correlation, cbar=True, square=True, fmt='.1f', annot=True, annot_kws={'size': 8}, cmap='YlGnBu')
plt.show() | code |
74040768/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/pizza-price-prediction/pizza_v1.csv'
df = pd.read_csv(path)
def value_counts(data):
pass
value_counts(df) | code |
74040768/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/pizza-price-prediction/pizza_v1.csv'
df = pd.read_csv(path)
df.head() | code |
74040768/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
path = '../input/pizza-price-prediction/pizza_v1.csv'
df = pd.read_csv(path)
correlation = df.corr()
X = df.drop(['price_rupiah'], axis=1)
y = df['price_rupiah']
X.head() | code |
74040768/cell_30 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import numpy as np
def run_model(model, X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
train_accuracy = model.score(X_train, y_train)
test_accuracy = model.score(X_test, y_test)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
lr = LinearRegression()
run_model(lr, X_train, X_test, y_train, y_test) | code |
74040768/cell_33 | [
"image_output_1.png"
] | from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
def run_model(model, X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
train_accuracy = model.score(X_train, y_train)
test_accuracy = model.score(X_test, y_test)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
k_values = [1, 5, 10]
for n in k_values:
model = KNeighborsRegressor(n_neighbors=n)
run_model(model, X_train, X_test, y_train, y_test)
print()
print('The Number of neighbors is : {}'.format(n))
print()
print('--------------------------------')
print() | code |
74040768/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/pizza-price-prediction/pizza_v1.csv'
df = pd.read_csv(path)
df.head() | code |
74040768/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/pizza-price-prediction/pizza_v1.csv'
df = pd.read_csv(path)
df.head() | code |
74040768/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import numpy as np
def run_model(model, X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
train_accuracy = model.score(X_train, y_train)
test_accuracy = model.score(X_test, y_test)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
k_values = [1, 5, 10]
for n in k_values:
model = KNeighborsRegressor(n_neighbors=n)
run_model(model, X_train, X_test, y_train, y_test)
model = DecisionTreeRegressor()
run_model(model, X_train, X_test, y_train, y_test)
trees = [10, 50, 100, 200, 500]
for n in trees:
model = RandomForestRegressor(n_estimators=n)
run_model(model, X_train, X_test, y_train, y_test)
print()
print('The Number of estimators is : {}'.format(n))
print()
print('--------------------------------')
print() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.