path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
122262213/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text
import pandas as pd
df = pd.read_csv('/kaggle/input/iris-flower-dataset/IRIS.csv')
df.columns
y = df['species']
X = df.drop(['species'], axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
prediction = clf.predict(X_test)
accuracy_score(y_test, prediction)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, X, y, scoring='accuracy', cv=10)
scores.mean() | code |
73097582/cell_34 | [
"text_plain_output_1.png"
] | import glob
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
used_df.boxplot(column=['ride_duration'], by=['member_casual'], figsize=(10, 6), labels=['casual', 'member']) | code |
73097582/cell_30 | [
"text_plain_output_1.png"
] | import glob
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
used_df.info() | code |
73097582/cell_33 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import glob
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
used_df[['member_casual', 'ride_duration']].groupby(by=['member_casual']).describe(percentiles=[0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99]) | code |
73097582/cell_44 | [
"text_plain_output_1.png"
] | import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
merged_df['started_at'] = pd.to_datetime(merged_df['started_at'])
merged_df['ended_at'] = pd.to_datetime(merged_df['ended_at'])
merged_df['ride_duration'] = (merged_df['ended_at'] - merged_df['started_at']) / pd.Timedelta(minutes=1)
merged_df['ride_duration']
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
num_ride_by_hour = pd.pivot_table(used_df, values='ride_id', index=['starting_hour'], columns=['member_casual'], aggfunc='count')
num_ride_by_hour['pct_col_casual'] = num_ride_by_hour['casual'] / num_ride_by_hour['casual'].sum()
num_ride_by_hour['pct_col_member'] = num_ride_by_hour['member'] / num_ride_by_hour['member'].sum()
fig, ax = plt.subplots()
x = np.arange(len(num_ride_by_hour.index))
width = 0.4
casual = ax.bar(x-width/2, num_ride_by_hour['pct_col_casual'], width=width, label='Casual')
member = ax.bar(x+width/2, num_ride_by_hour['pct_col_member'], width=width, label='Member')
plt.xticks(x, num_ride_by_hour.index)
ax.legend()
plt.show()
num_ride_by_weekday = pd.pivot_table(used_df, values='ride_id', index=['starting_weekday'], columns=['member_casual'], aggfunc='count')
num_ride_by_weekday['pct_col_casual'] = num_ride_by_weekday['casual'] / num_ride_by_weekday['casual'].sum()
num_ride_by_weekday['pct_col_member'] = num_ride_by_weekday['member'] / num_ride_by_weekday['member'].sum()
fig, ax = plt.subplots()
x = np.arange(len(num_ride_by_weekday.index))
width = 0.4
casual = ax.bar(x - width / 2, num_ride_by_weekday['pct_col_casual'], width=width, label='Casual')
member = ax.bar(x + width / 2, num_ride_by_weekday['pct_col_member'], width=width, label='Member')
plt.xticks(x, num_ride_by_weekday.index)
ax.legend()
plt.show() | code |
73097582/cell_20 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import glob
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
explore_stats(merged_df.loc[:, 'ride_duration':'season']) | code |
73097582/cell_6 | [
"text_plain_output_1.png"
] | import glob
import os
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files | code |
73097582/cell_40 | [
"text_html_output_1.png"
] | import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
merged_df['started_at'] = pd.to_datetime(merged_df['started_at'])
merged_df['ended_at'] = pd.to_datetime(merged_df['ended_at'])
merged_df['ride_duration'] = (merged_df['ended_at'] - merged_df['started_at']) / pd.Timedelta(minutes=1)
merged_df['ride_duration']
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
num_ride_by_hour = pd.pivot_table(used_df, values='ride_id', index=['starting_hour'], columns=['member_casual'], aggfunc='count')
num_ride_by_hour['pct_col_casual'] = num_ride_by_hour['casual'] / num_ride_by_hour['casual'].sum()
num_ride_by_hour['pct_col_member'] = num_ride_by_hour['member'] / num_ride_by_hour['member'].sum()
fig, ax = plt.subplots()
x = np.arange(len(num_ride_by_hour.index))
width = 0.4
casual = ax.bar(x - width / 2, num_ride_by_hour['pct_col_casual'], width=width, label='Casual')
member = ax.bar(x + width / 2, num_ride_by_hour['pct_col_member'], width=width, label='Member')
plt.xticks(x, num_ride_by_hour.index)
ax.legend()
plt.show() | code |
73097582/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
merged_df['started_at'] = pd.to_datetime(merged_df['started_at'])
merged_df['ended_at'] = pd.to_datetime(merged_df['ended_at'])
merged_df['ride_duration'] = (merged_df['ended_at'] - merged_df['started_at']) / pd.Timedelta(minutes=1)
merged_df['ride_duration']
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
a = used_df[used_df['member_casual'] == 'member']['ride_duration']
b = used_df[used_df['member_casual'] == 'casual']['ride_duration']
for i in range(7):
result = stats.ttest_ind(a + i, b, equal_var=False, alternative='two-sided')
num_ride_by_hour = pd.pivot_table(used_df, values='ride_id', index=['starting_hour'], columns=['member_casual'], aggfunc='count')
num_ride_by_hour['pct_col_casual'] = num_ride_by_hour['casual'] / num_ride_by_hour['casual'].sum()
num_ride_by_hour['pct_col_member'] = num_ride_by_hour['member'] / num_ride_by_hour['member'].sum()
fig, ax = plt.subplots()
x = np.arange(len(num_ride_by_hour.index))
width = 0.4
casual = ax.bar(x-width/2, num_ride_by_hour['pct_col_casual'], width=width, label='Casual')
member = ax.bar(x+width/2, num_ride_by_hour['pct_col_member'], width=width, label='Member')
plt.xticks(x, num_ride_by_hour.index)
ax.legend()
plt.show()
stat, p, dof, expected = stats.chi2_contingency(num_ride_by_hour[['casual', 'member']])
print('T-statistics: ' + str(stat))
print('P-value: ' + str(p)) | code |
73097582/cell_11 | [
"text_plain_output_1.png"
] | import glob
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
data_dict = explore_stats(merged_df)
data_dict | code |
73097582/cell_7 | [
"image_output_1.png"
] | import glob
import os
import pandas as pd
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
merged_df.head() | code |
73097582/cell_8 | [
"text_plain_output_1.png"
] | import glob
import os
import pandas as pd
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
merged_df.tail() | code |
73097582/cell_16 | [
"text_html_output_1.png"
] | import glob
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
merged_df['started_at'] = pd.to_datetime(merged_df['started_at'])
merged_df['ended_at'] = pd.to_datetime(merged_df['ended_at'])
merged_df['ride_duration'] = (merged_df['ended_at'] - merged_df['started_at']) / pd.Timedelta(minutes=1)
merged_df['ride_duration'] | code |
73097582/cell_31 | [
"text_plain_output_1.png"
] | import glob
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
used_df.describe() | code |
73097582/cell_24 | [
"text_plain_output_1.png"
] | import glob
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
print('Lower bound and upper bound are: ' + str(lower_bound) + ', ' + str(upper_bound))
print('Percentile of lower bound: ' + str(stats.percentileofscore(duration_data, lower_bound)))
print('Percentile of upper bound: ' + str(stats.percentileofscore(duration_data, upper_bound))) | code |
73097582/cell_14 | [
"text_html_output_1.png"
] | import glob
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
merged_df['started_at'] = pd.to_datetime(merged_df['started_at'])
merged_df['ended_at'] = pd.to_datetime(merged_df['ended_at'])
print(merged_df['started_at'].dtype)
print(merged_df['ended_at'].dtype) | code |
73097582/cell_22 | [
"text_plain_output_1.png"
] | import glob
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape | code |
73097582/cell_27 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import glob
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type']) | code |
73097582/cell_36 | [
"text_html_output_1.png"
] | import glob
import numpy as np
import os
import pandas as pd
import scipy.stats as stats
path = '../input/cyclistic-trip-data'
all_files = glob.glob(os.path.join(path, '*.csv'))
all_files
merged_df = pd.DataFrame()
for f in all_files:
df_each_file = pd.read_csv(f)
merged_df = merged_df.append(df_each_file, ignore_index=True)
def explore_stats(df):
nrows, ncols = df.shape
print("Total records:", nrows)
print("Total columns:", ncols)
# create columns list and check dtype
feature = []
type_lst = []
for key, value in df.dtypes.iteritems():
feature.append(key)
type_lst.append(value)
# check distinct value
distinct = []
for i in df.columns:
num_distinct = df[i].unique().size
distinct_pct = num_distinct / nrows * 100
distinct.append("{} ({:0.2f}%)".format(num_distinct, distinct_pct))
# check null values
null = []
for i in df.columns:
num_null = df[i].isna().sum()
null_pct = num_null / nrows * 100
null.append("{} ({:0.2f}%)".format(num_null, null_pct))
# check negative values
negative = []
for i in df.columns:
try:
num_neg = (df[i].astype('float') < 0).sum()
neg_pct = num_neg / nrows * 100
negative.append("{} ({:0.2f}%)".format(num_neg, neg_pct))
except:
negative.append(str(0) + " (0%)")
continue
# check zeros
zeros = []
for i in df.columns:
try:
num_zero = (df[i] == 0).sum()
zero_pct = num_zero / nrows * 100
zeros.append("{} ({:0.2f}%)".format(num_zero, zero_pct))
except:
zeros.append(str(0) + " (0%)")
continue
# check stats measure
stats = df.describe().transpose()
# put measures into a dataframe
data = {'feature': feature,
'data_type': type_lst,
'n_distinct': distinct,
'n_missing': null,
'n_negative': negative,
'n_zeros': zeros}
for y in stats.columns:
data[y] = []
for x in df.columns:
try:
data[y].append(stats.loc[x, y])
except:
data[y].append(0.0)
df_stats = pd.DataFrame(data)
return df_stats
conditions = [merged_df['starting_month'] <= 3, (merged_df['starting_month'] >= 4) & (merged_df['starting_month'] <= 6), (merged_df['starting_month'] >= 7) & (merged_df['starting_month'] <= 9), (merged_df['starting_month'] >= 10) & (merged_df['starting_month'] <= 12)]
values = ['spring', 'summer', 'autumn', 'winter']
merged_df['season'] = np.select(conditions, values)
explore_stats(merged_df.loc[:, 'ride_duration':'season'])
merged_df_v2 = merged_df[merged_df['ride_duration'] > 0]
merged_df_v2.shape
duration_data = sorted(merged_df_v2['ride_duration'])
q1 = np.percentile(duration_data, 25)
q3 = np.percentile(duration_data, 75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
merged_df_v3 = merged_df_v2[merged_df_v2['ride_duration'] < upper_bound]
merged_df_v4 = merged_df_v3.dropna(axis=0, how='any', subset=['start_station_name', 'end_station_name'])
explore_stats(merged_df_v4).sort_values(by=['data_type'])
used_df = merged_df_v4.drop(['start_station_id', 'end_station_id'], axis=1)
a = used_df[used_df['member_casual'] == 'member']['ride_duration']
b = used_df[used_df['member_casual'] == 'casual']['ride_duration']
for i in range(7):
result = stats.ttest_ind(a + i, b, equal_var=False, alternative='two-sided')
print('Result of ttest with MEMBER smaller than CASUAL by ' + str(i))
print(result)
print('--------------------------------------------') | code |
49130544/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/creditcard/creditcard.csv')
fraud = data[data['Class'] == 1]
normal = data[data['Class'] == 0]
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins=bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins=bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log') | code |
49130544/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
data = pd.read_csv('../input/creditcard/creditcard.csv')
fraud = data[data['Class'] == 1]
normal = data[data['Class'] == 0]
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
data1 = data.sample(frac=0.1, random_state=1)
Fraud = data1[data1['Class'] == 1]
Valid = data1[data1['Class'] == 0]
state = np.random.RandomState(42)
outlier_fraction = len(Fraud) / float(len(Valid))
state = np.random.RandomState(42)
## Correlation
import seaborn as sns
#get correlations of each features in dataset
corrmat = data1.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap="RdYlGn")
columns = data1.columns.tolist()
columns = [c for c in columns if c not in ['Class']]
target = 'Class'
state = np.random.RandomState(42)
X = data1[columns]
Y = data1[target]
X_outliers = state.uniform(low=0, high=1, size=(X.shape[0], X.shape[1]))
classifiers = {'Isolation Forest': IsolationForest(n_estimators=100, max_samples=len(X), contamination=outlier_fraction, random_state=state, verbose=0), 'Local Outlier Factor': LocalOutlierFactor(n_neighbors=20, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, contamination=outlier_fraction)}
type(classifiers) | code |
49130544/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
data = pd.read_csv('../input/creditcard/creditcard.csv')
fraud = data[data['Class'] == 1]
normal = data[data['Class'] == 0]
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
data1 = data.sample(frac=0.1, random_state=1)
Fraud = data1[data1['Class'] == 1]
Valid = data1[data1['Class'] == 0]
state = np.random.RandomState(42)
outlier_fraction = len(Fraud) / float(len(Valid))
state = np.random.RandomState(42)
## Correlation
import seaborn as sns
#get correlations of each features in dataset
corrmat = data1.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap="RdYlGn")
columns = data1.columns.tolist()
columns = [c for c in columns if c not in ['Class']]
target = 'Class'
state = np.random.RandomState(42)
X = data1[columns]
Y = data1[target]
X_outliers = state.uniform(low=0, high=1, size=(X.shape[0], X.shape[1]))
print(X.shape)
print(Y.shape) | code |
49130544/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/creditcard/creditcard.csv')
data.head() | code |
49130544/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from statistics import mean
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
data = pd.read_csv('../input/creditcard/creditcard.csv')
fraud = data[data['Class'] == 1]
normal = data[data['Class'] == 0]
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
data1 = data.sample(frac=0.1, random_state=1)
Fraud = data1[data1['Class'] == 1]
Valid = data1[data1['Class'] == 0]
state = np.random.RandomState(42)
outlier_fraction = len(Fraud) / float(len(Valid))
state = np.random.RandomState(42)
## Correlation
import seaborn as sns
#get correlations of each features in dataset
corrmat = data1.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap="RdYlGn")
xs = data.V2
ys = data.Amount
def line_of_best(xs, ys):
m = (mean(xs) * mean(ys) - mean(xs * ys)) / (mean(xs) * mean(xs) - mean(xs * xs))
b = mean(ys) - m * mean(xs)
return (m, b)
m, b = line_of_best(xs, ys)
regression_line = [m * x + b for x in xs]
plt.scatter(xs, ys)
plt.plot(xs, regression_line) | code |
49130544/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
data = pd.read_csv('../input/creditcard/creditcard.csv')
fraud = data[data['Class'] == 1]
normal = data[data['Class'] == 0]
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
data1 = data.sample(frac=0.1, random_state=1)
Fraud = data1[data1['Class'] == 1]
Valid = data1[data1['Class'] == 0]
state = np.random.RandomState(42)
outlier_fraction = len(Fraud) / float(len(Valid))
state = np.random.RandomState(42)
import seaborn as sns
corrmat = data1.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20, 20))
g = sns.heatmap(data[top_corr_features].corr(), annot=True, cmap='RdYlGn') | code |
49130544/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import IsolationForest
from sklearn.metrics import classification_report,accuracy_score
from sklearn.neighbors import LocalOutlierFactor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
data = pd.read_csv('../input/creditcard/creditcard.csv')
fraud = data[data['Class'] == 1]
normal = data[data['Class'] == 0]
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
data1 = data.sample(frac=0.1, random_state=1)
Fraud = data1[data1['Class'] == 1]
Valid = data1[data1['Class'] == 0]
state = np.random.RandomState(42)
outlier_fraction = len(Fraud) / float(len(Valid))
state = np.random.RandomState(42)
## Correlation
import seaborn as sns
#get correlations of each features in dataset
corrmat = data1.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap="RdYlGn")
columns = data1.columns.tolist()
columns = [c for c in columns if c not in ['Class']]
target = 'Class'
state = np.random.RandomState(42)
X = data1[columns]
Y = data1[target]
X_outliers = state.uniform(low=0, high=1, size=(X.shape[0], X.shape[1]))
classifiers = {'Isolation Forest': IsolationForest(n_estimators=100, max_samples=len(X), contamination=outlier_fraction, random_state=state, verbose=0), 'Local Outlier Factor': LocalOutlierFactor(n_neighbors=20, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, contamination=outlier_fraction)}
n_outliers = len(Fraud)
for i, (clf_name, clf) in enumerate(classifiers.items()):
if clf_name == 'Local Outlier Factor':
y_pred = clf.fit_predict(X)
scores_prediction = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_prediction = clf.decision_function(X)
y_pred = clf.predict(X)
y_pred[y_pred == 1] = 0
y_pred[y_pred == -1] = 1
n_errors = (y_pred != Y).sum()
print('{}: {}'.format(clf_name, n_errors))
print('Accuracy Score :')
print(accuracy_score(Y, y_pred))
print('Classification Report :')
print(classification_report(Y, y_pred)) | code |
49130544/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/creditcard/creditcard.csv')
fraud = data[data['Class'] == 1]
normal = data[data['Class'] == 0]
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
plt.boxplot(data.Amount) | code |
2017954/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dtypes
train['Age'].head() | code |
2017954/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
pd.isnull(train).sum()
pd.isnull(test).sum()
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
pd.isnull(train).sum()
pd.isnull(test).sum() | code |
2017954/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dtypes
train.head() | code |
2017954/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
pd.isnull(train).sum() | code |
2017954/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dtypes
test.dtypes
from sklearn.linear_model import LogisticRegression
glm = LogisticRegression()
X_train = train.drop('Survived', axis=1)
Y_train = train['Survived']
X_test = test.drop('PassengerId', axis=1).copy()
glm.fit(X_train, Y_train) | code |
2017954/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
test.dtypes
test.head() | code |
2017954/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
2017954/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
test.dtypes
test.Age.describe() | code |
2017954/cell_32 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
pd.isnull(train).sum()
pd.isnull(test).sum()
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
pd.isnull(train).sum()
pd.isnull(test).sum()
train.dtypes
test.dtypes
from sklearn.linear_model import LogisticRegression
glm = LogisticRegression()
X_train = train.drop('Survived', axis=1)
Y_train = train['Survived']
X_test = test.drop('PassengerId', axis=1).copy()
glm.fit(X_train, Y_train)
predicted = glm.predict(X_test)
submission = pd.DataFrame({'PassengerId': test_ID, 'Survived': predicted})
submission.head() | code |
2017954/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
age_mean_train = train['Age'].mean()
train['Age'] = train['Age'].fillna(age_mean_train)
age_mean_test = test['Age'].mean()
test['Age'] = test['Age'].fillna(age_mean_test)
print('From train data')
southampton = train[train['Embarked'] == 'S'].shape[0]
cherbourg = train[train['Embarked'] == 'C'].shape[0]
queenstown = train[train['Embarked'] == 'Q'].shape[0]
print('No. of people from Southampton (S) = ', southampton)
print('No. of people from Cherbourg (C) = ', cherbourg)
print('No. of people from Queenstown (Q) = ', queenstown)
print('\nFrom test data')
southampton = train[train['Embarked'] == 'S'].shape[0]
cherbourg = train[train['Embarked'] == 'C'].shape[0]
queenstown = train[train['Embarked'] == 'Q'].shape[0]
print('No. of people from Southampton (S) = ', southampton)
print('No. of people from Cherbourg (C) = ', cherbourg)
print('No. of people from Queenstown (Q) = ', queenstown) | code |
2017954/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
test.dtypes | code |
2017954/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dtypes
train.Age.describe() | code |
2017954/cell_31 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dtypes
test.dtypes
from sklearn.linear_model import LogisticRegression
glm = LogisticRegression()
X_train = train.drop('Survived', axis=1)
Y_train = train['Survived']
X_test = test.drop('PassengerId', axis=1).copy()
glm.fit(X_train, Y_train)
predicted = glm.predict(X_test)
print('Accurcy = %.2f' % round(glm.score(X_train, Y_train) * 100, 2)) | code |
2017954/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dtypes | code |
2017954/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
test.dtypes
test['Age'].head() | code |
2017954/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
pd.isnull(train).sum()
pd.isnull(test).sum()
test_ID = test.PassengerId
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test = test.drop(['Name', 'Ticket'], axis=1)
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
pd.isnull(train).sum() | code |
2017954/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
pd.isnull(train).sum()
pd.isnull(test).sum() | code |
90120622/cell_21 | [
"text_html_output_1.png"
] | import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
df['time'] = df['time'].map(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %X'))
df['day'] = df['time'].map(lambda x: x.strftime('%A'))
df['timeonly'] = df['time'].map(lambda x: x.time())
df['date'] = df['time'].map(lambda x: x.date())
df['month'] = df['time'].map(lambda x: x.month)
fig, axes = plt.subplots(nrows=2,ncols=2)
fig.set_size_inches(12, 10)
sns.boxplot(data=df,y="congestion",x="y",ax=axes[0][0])
sns.boxplot(data=df,y="congestion",x="x",orient="v",ax=axes[0][1])
sns.boxplot(data=df,y="congestion",x="direction",orient="v",ax=axes[1][0])
sns.boxplot(data=df,y="congestion",x="xy",orient="v",ax=axes[1][1])
plt.suptitle("Boxplots of congestion over spatial features",fontsize=16)
plt.show()
daysofweek = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, axes = plt.subplots(nrows=1,ncols=2)
fig.set_size_inches(15, 5)
sns.boxplot(data=df,y="congestion",x="day",ax=axes[0])
axes[1].plot(df.groupby(["day"]).congestion.mean().reindex(daysofweek))
plt.show()
dd = df.groupby('timeonly').congestion.mean().reset_index()
plt.figure(figsize=(18, 5))
plt.plot(dd['timeonly'].map(lambda x: str(x)), dd['congestion'])
plt.xticks(rotation=45)
plt.axvline(x=str(dd.loc[np.argmin(dd['congestion']), 'timeonly']), c='red')
plt.axvline(x=str(dd.loc[np.argmax(dd['congestion']), 'timeonly']), c='green')
plt.axvline(x=str(dd.loc[np.argmax(dd.loc[dd['timeonly'] < datetime.time(10, 20, 0), 'congestion']), 'timeonly']), c='blue')
plt.title('Average congestion vs time of day', fontsize=20)
plt.xlabel('Time of day', fontsize=16)
plt.ylabel('Mean congestion', fontsize=16)
plt.show() | code |
90120622/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
df.head() | code |
90120622/cell_4 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
df.head() | code |
90120622/cell_23 | [
"image_output_1.png"
] | import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
df['time'] = df['time'].map(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %X'))
df['day'] = df['time'].map(lambda x: x.strftime('%A'))
df['timeonly'] = df['time'].map(lambda x: x.time())
df['date'] = df['time'].map(lambda x: x.date())
df['month'] = df['time'].map(lambda x: x.month)
fig, axes = plt.subplots(nrows=2,ncols=2)
fig.set_size_inches(12, 10)
sns.boxplot(data=df,y="congestion",x="y",ax=axes[0][0])
sns.boxplot(data=df,y="congestion",x="x",orient="v",ax=axes[0][1])
sns.boxplot(data=df,y="congestion",x="direction",orient="v",ax=axes[1][0])
sns.boxplot(data=df,y="congestion",x="xy",orient="v",ax=axes[1][1])
plt.suptitle("Boxplots of congestion over spatial features",fontsize=16)
plt.show()
daysofweek = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, axes = plt.subplots(nrows=1,ncols=2)
fig.set_size_inches(15, 5)
sns.boxplot(data=df,y="congestion",x="day",ax=axes[0])
axes[1].plot(df.groupby(["day"]).congestion.mean().reindex(daysofweek))
plt.show()
dd = df.groupby('timeonly').congestion.mean().reset_index()
plt.xticks(rotation=45)
plt.figure(figsize=(20, 10))
for x in daysofweek:
dd = df[df['day'] == x].groupby('timeonly').congestion.mean().reset_index()
plt.plot(dd['timeonly'].map(lambda x: str(x)), dd['congestion'], label=x)
plt.xticks(rotation=45)
plt.title('Average congestion over the day for different days of the week', fontsize=20)
plt.xlabel('Time of day', fontsize=16)
plt.ylabel('Mean congestion', fontsize=16)
plt.legend()
plt.show() | code |
90120622/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
print(df.x.unique(), df.y.unique(), df.direction.unique()) | code |
90120622/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
fig, axes = plt.subplots(nrows=2,ncols=2)
fig.set_size_inches(12, 10)
sns.boxplot(data=df,y="congestion",x="y",ax=axes[0][0])
sns.boxplot(data=df,y="congestion",x="x",orient="v",ax=axes[0][1])
sns.boxplot(data=df,y="congestion",x="direction",orient="v",ax=axes[1][0])
sns.boxplot(data=df,y="congestion",x="xy",orient="v",ax=axes[1][1])
plt.suptitle("Boxplots of congestion over spatial features",fontsize=16)
plt.show()
daysofweek = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(15, 5)
sns.boxplot(data=df, y='congestion', x='day', ax=axes[0])
axes[1].plot(df.groupby(['day']).congestion.mean().reindex(daysofweek))
plt.show() | code |
90120622/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
df['xy'] = list(zip(df['x'], df['y']))
df['xydir'] = list(zip(df['x'], df['y'], df['direction']))
df['xy'].unique() | code |
90120622/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.set_size_inches(12, 10)
sns.boxplot(data=df, y='congestion', x='y', ax=axes[0][0])
sns.boxplot(data=df, y='congestion', x='x', orient='v', ax=axes[0][1])
sns.boxplot(data=df, y='congestion', x='direction', orient='v', ax=axes[1][0])
sns.boxplot(data=df, y='congestion', x='xy', orient='v', ax=axes[1][1])
plt.suptitle('Boxplots of congestion over spatial features', fontsize=16)
plt.show() | code |
90120622/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
for i in df['xy'].unique():
print(i, ':', df.loc[df['xy'] == i, 'direction'].unique()) | code |
334788/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
dficd = pd.read_csv('../input/Icd10Code.csv') | code |
104117596/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train, y_train)
model.score(X_test, y_test)
arr = model.predict(X_test)
arr2 = []
for x in arr:
if x == 1:
arr2.append(x)
print(len(arr2) / len(arr)) | code |
104117596/cell_2 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104117596/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train, y_train) | code |
104117596/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train, y_train)
model.score(X_test, y_test) | code |
73086698/cell_6 | [
"text_html_output_1.png"
] | from surprise import Dataset,Reader,SVD
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from surprise import Dataset, Reader, SVD
reader = Reader()
ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv')
rows = ratings.userId.unique()
columns = ratings.movieId.unique()
columns
rows
ratings | code |
73086698/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73086698/cell_7 | [
"text_html_output_1.png"
] | from surprise import Dataset,Reader,SVD
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from surprise import Dataset, Reader, SVD
reader = Reader()
ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv')
rows = ratings.userId.unique()
columns = ratings.movieId.unique()
columns
rows
myData = np.array([0.0 for i in range(671 * 9066)])
mydf = pd.DataFrame(myData.reshape(671, -1))
mydf.columns = columns
mydf.index = rows
mydf
for i in range(100004):
mydf.loc[ratings.loc[i, 'userId'], ratings.loc[i, 'movieId']] = ratings.loc[i, 'rating']
mydf | code |
73086698/cell_8 | [
"text_html_output_1.png"
] | from surprise import Dataset,Reader,SVD
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from surprise import Dataset, Reader, SVD
reader = Reader()
ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv')
rows = ratings.userId.unique()
columns = ratings.movieId.unique()
columns
rows
myData = np.array([0.0 for i in range(671 * 9066)])
mydf = pd.DataFrame(myData.reshape(671, -1))
mydf.columns = columns
mydf.index = rows
mydf
for i in range(100004):
mydf.loc[ratings.loc[i, 'userId'], ratings.loc[i, 'movieId']] = ratings.loc[i, 'rating']
mydf
mydf | code |
73086698/cell_3 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from surprise import Dataset,Reader,SVD
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from surprise import Dataset, Reader, SVD
reader = Reader()
ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv')
rows = ratings.userId.unique()
columns = ratings.movieId.unique()
print(len(rows))
print(len(columns))
columns
rows | code |
73086698/cell_5 | [
"text_html_output_1.png"
] | from surprise import Dataset,Reader,SVD
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from surprise import Dataset, Reader, SVD
reader = Reader()
ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv')
rows = ratings.userId.unique()
columns = ratings.movieId.unique()
columns
rows
myData = np.array([0.0 for i in range(671 * 9066)])
mydf = pd.DataFrame(myData.reshape(671, -1))
mydf.columns = columns
mydf.index = rows
mydf | code |
122262131/cell_42 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
h = df1['State'].value_counts()
h
u = df1.groupby('Category')['Profit'].sum()
u
l = df1.groupby('Category')['Sales'].sum()
l
h = df1.groupby(['State', 'City'])['Profit'].sum()
h | code |
122262131/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import cufflinks as cs
import plotly.express as px
import plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot | code |
122262131/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
u = df1.groupby('Category')['Profit'].sum()
u | code |
122262131/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
h = df1['State'].value_counts()
h | code |
122262131/cell_44 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
h = df1['State'].value_counts()
h
u = df1.groupby('Category')['Profit'].sum()
u
l = df1.groupby('Category')['Sales'].sum()
l
h = df1.groupby(['State', 'City'])['Profit'].sum()
h
r = df1.groupby(['State', 'City'])['Sales'].sum()
r
t = df1.groupby(['State', 'City'])['Quantity'].mean()
t | code |
122262131/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3) | code |
122262131/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.head() | code |
122262131/cell_40 | [
"text_plain_output_1.png"
] | import pandas as pd
import plotly.express as px
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
fig = px.line(v1, 'Month', 'Profit', color='State', hover_name='State', title='Profit Over Months')
fig = px.line(v1, 'Month', 'Sales', color='State', hover_name='State', title='Sales Over Months')
u = df1.groupby('Category')['Profit'].sum()
u
l = df1.groupby('Category')['Sales'].sum()
l
fig=px.bar(u,u.index,u.values,color=u.index,title='Profit By Categroy',hover_name=u.values)
fig.show()
fig = px.bar(l, l.index, l.values, color=l.index, title='Sales From Each Category', hover_name=l.index)
fig.show() | code |
122262131/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
fig = px.line(v1, 'Month', 'Profit', color='State', hover_name='State', title='Profit Over Months')
fig = px.line(v1, 'Month', 'Sales', color='State', hover_name='State', title='Sales Over Months')
fig.show() | code |
122262131/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
fig = px.line(v1, 'Month', 'Profit', color='State', hover_name='State', title='Profit Over Months')
fig.show() | code |
122262131/cell_45 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
h = df1['State'].value_counts()
h
u = df1.groupby('Category')['Profit'].sum()
u
l = df1.groupby('Category')['Sales'].sum()
l
h = df1.groupby(['State', 'City'])['Profit'].sum()
h
r = df1.groupby(['State', 'City'])['Sales'].sum()
r
t = df1.groupby(['State', 'City'])['Quantity'].mean()
t
x = df1.groupby(['State', 'City'])['Sales'].cumsum()
x | code |
122262131/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1.nunique()
df1.isna().sum() | code |
122262131/cell_32 | [
"text_html_output_2.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
m = df1['City'].value_counts()
m | code |
122262131/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape | code |
122262131/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1.nunique() | code |
122262131/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
u = df1.groupby('Category')['Profit'].sum()
u
l = df1.groupby('Category')['Sales'].sum()
l | code |
122262131/cell_43 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
h = df1['State'].value_counts()
h
u = df1.groupby('Category')['Profit'].sum()
u
l = df1.groupby('Category')['Sales'].sum()
l
h = df1.groupby(['State', 'City'])['Profit'].sum()
h
r = df1.groupby(['State', 'City'])['Sales'].sum()
r | code |
122262131/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
g = df1['Category'].value_counts()
g | code |
122262131/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1 | code |
122262131/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3) | code |
122262131/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.info() | code |
122262131/cell_37 | [
"text_plain_output_1.png"
] | import pandas as pd
import plotly.express as px
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns
df1['Order Date'] = pd.to_datetime(df1['Order Date'])
df1['Ship Date'] = pd.to_datetime(df1['Ship Date'])
df1.nunique()
df1.isna().sum()
df1['Profit/Unit'] = df1['Profit'] / df1['Quantity']
df1['Price'] = df1['Sales'] / df1['Quantity']
df1['Cost Per Unit'] = df1['Price'] - df1['Profit/Unit']
df1.sample(3)
df1['Month'] = df1['Order Date'].dt.month
df1.sample(3)
b = df1.groupby(['Month', 'State'])['Sales'].sum()
v = df1.groupby(['Month', 'State'])['Profit'].sum()
v1 = pd.merge(b, v, how='left', on=['Month', 'State'])
v1.reset_index
v1 = pd.DataFrame(v1)
v1.reset_index(inplace=True)
v1
fig = px.line(v1, 'Month', 'Profit', color='State', hover_name='State', title='Profit Over Months')
fig = px.line(v1, 'Month', 'Sales', color='State', hover_name='State', title='Sales Over Months')
u = df1.groupby('Category')['Profit'].sum()
u
fig = px.bar(u, u.index, u.values, color=u.index, title='Profit By Categroy', hover_name=u.values)
fig.show() | code |
122262131/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df1 = pd.read_csv('/kaggle/input/walmart-sales-analysis/Walmart.csv', index_col='Order ID')
df1.shape
df1.columns | code |
130022960/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
b = asus['Newspaper Ad Budget ($)'].quantile(0.98)
asus_new = asus[asus['Newspaper Ad Budget ($)'] < b]
asus_new.isnull().sum() * 100 / asus_new.shape[0]
asus_new.duplicated().sum()
asus_new.drop(columns=['Unnamed: 0'])
sns.pairplot(asus_new, x_vars=['TV Ad Budget ($)', 'Radio Ad Budget ($)', 'Newspaper Ad Budget ($)'], y_vars='Sales ($)', height=5, aspect=0.5, kind='scatter')
plt.show() | code |
130022960/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
plt.figure(figsize=(6, 3))
sns.boxplot(asus['TV Ad Budget ($)'])
plt.show() | code |
130022960/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape | code |
130022960/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
b = asus['Newspaper Ad Budget ($)'].quantile(0.98)
asus_new = asus[asus['Newspaper Ad Budget ($)'] < b]
asus_new.isnull().sum() * 100 / asus_new.shape[0]
asus_new.duplicated().sum()
asus_new.drop(columns=['Unnamed: 0'])
sns.heatmap(asus_new.corr(), cmap='YlGnBu', annot=True)
plt.show() | code |
130022960/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.describe() | code |
130022960/cell_2 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns | code |
130022960/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
plt.figure(figsize=(6, 3))
sns.boxplot(asus['Newspaper Ad Budget ($)'])
plt.show() | code |
130022960/cell_19 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
b = asus['Newspaper Ad Budget ($)'].quantile(0.98)
asus_new = asus[asus['Newspaper Ad Budget ($)'] < b]
asus_new.isnull().sum() * 100 / asus_new.shape[0]
asus_new.duplicated().sum()
asus_new.drop(columns=['Unnamed: 0']) | code |
130022960/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0']) | code |
130022960/cell_18 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
b = asus['Newspaper Ad Budget ($)'].quantile(0.98)
asus_new = asus[asus['Newspaper Ad Budget ($)'] < b]
asus_new.isnull().sum() * 100 / asus_new.shape[0]
asus_new.duplicated().sum() | code |
130022960/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
b = asus['Newspaper Ad Budget ($)'].quantile(0.98)
asus_new = asus[asus['Newspaper Ad Budget ($)'] < b]
print('number of rows we lost', asus.shape[0] - asus_new.shape[0]) | code |
130022960/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.shape
asus.drop(columns=['Unnamed: 0'])
b = asus['Newspaper Ad Budget ($)'].quantile(0.98)
asus_new = asus[asus['Newspaper Ad Budget ($)'] < b]
asus_new.info() | code |
130022960/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
asus = pd.DataFrame(pd.read_csv('/kaggle/input/advertising-sales-dataset/Advertising Budget and Sales.csv'))
asus.head(10) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.