path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
89127402/cell_18 | [
"text_html_output_2.png"
] | from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import pandas as pd
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date'])
dublin_air_data.info() | code |
89127402/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import pandas as pd
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
stations.head(3) | code |
89127402/cell_16 | [
"text_plain_output_1.png"
] | from dateutil.relativedelta import relativedelta
from geopy.distance import distance
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import pandas as pd
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
dublin_air_loc = (float(stations[stations['Station name'] == 'dublin_airport']['latitude_dd']), float(stations[stations['Station name'] == 'dublin_airport']['longitude_dd']))
stations['dist_to_dublin_air_km'] = stations.apply(lambda row: round(distance(dublin_air_loc, (row['latitude_dd'], row['longitude_dd'])).km, 2), axis=1)
stations.sort_values(by=['dist_to_dublin_air_km']).head(10) | code |
89127402/cell_24 | [
"text_plain_output_1.png"
] | from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import pandas as pd
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date'])
dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True)
dublin_air_data.set_index('date', inplace=True)
dublin_air_data.isnull().sum()
dublin_air_data[dublin_air_data.isna().any(axis=1)] | code |
89127402/cell_14 | [
"text_plain_output_1.png"
] | from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
import json
import matplotlib.pyplot as plt
import pandas as pd
import plotly.graph_objs as go
import json
import plotly.graph_objs as go
import urllib.request
def read_geojson(url):
with urllib.request.urlopen(url) as url:
jdata = json.loads(url.read().decode())
return jdata
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
ireland_url = 'https://gist.githubusercontent.com/pnewall/9a122c05ba2865c3a58f15008548fbbd/raw/5bb4f84d918b871ee0e8b99f60dde976bb711d7c/ireland_counties.geojson'
jdata = read_geojson(ireland_url)
jdata['type']
county_names = [jdata['features'][i]['id'] for i in range(len(jdata['features']))]
colorscale = [[0.0, '#cccba1'], [0.5, '#a1ccaa'], [1.0, '#a1c1cc']]
trace1 = go.Choropleth(geojson=jdata, showscale=False, colorscale=colorscale, zmin=0, zmax=1, z=[0.5] * len(jdata['features']), locations=county_names, featureidkey='properties.name')
trace2 = go.Scattergeo(lon=stations['longitude_dd'], lat=stations['latitude_dd'], text=stations['Station name'], mode='markers', marker=dict(opacity=0.8, color='blue', reversescale=True, autocolorscale=False, line=dict(width=0.5, color='lightgray')))
fig = go.Figure(data=[trace1, trace2])
fig.update_geos(center=dict(lon=-7.5, lat=53.7), lataxis_range=[51, 56], lonaxis_range=[-13, -6], resolution=50, scope='europe')
fig.update_layout(height=500, margin={'r': 0, 't': 50, 'l': 0, 'b': 0}, title='Locations of weather stations in Ireland')
fig.show() | code |
89127402/cell_22 | [
"text_html_output_1.png"
] | from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import pandas as pd
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date'])
dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True)
dublin_air_data.set_index('date', inplace=True)
print('Unique Timestamps in our data: ', dublin_air_data.index.nunique())
print('Total range: ', (dublin_air_data.index.max() - dublin_air_data.index.min()) / pd.Timedelta('1 hour')) | code |
89127402/cell_10 | [
"text_plain_output_1.png"
] | from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import pandas as pd
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
stations['latitude_dd'] = stations['Latitude'].apply(lambda x: int(str(x)[-2:]) / 3600 + int(str(x)[-4:-2]) / 60 + int(str(x)[:-4]))
stations['longitude_dd'] = stations['Longitude'].apply(lambda x: (int(str(x)[-2:]) / 3600 + int(str(x)[-4:-2]) / 60 + int(str(x)[:-4])) * -1)
stations | code |
89127402/cell_27 | [
"text_plain_output_1.png"
] | from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import pandas as pd
from dateutil.relativedelta import relativedelta
# rolling averages and std
def rolling_stat(timeseries, window_size):
# Determing rolling statistics
rolmean = timeseries.rolling(window = window_size).mean()
rolstd = timeseries.rolling(window = window_size).std()
# Plot rolling statistics:
fig, ax = plt.subplots(figsize = (12, 4))
orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original')
std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')
mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Standard Deviation')
plt.grid()
plt.show(block=False)
# get n predictions for series by model
def future_preds_df(model, series, num_steps):
pred_first = series.index.max() + relativedelta(weeks = 1)
pred_last = series.index.max() + relativedelta(weeks = num_steps)
date_range_index = pd.date_range(pred_first, pred_last, freq = 'W')
vals = model.predict(n_periods = num_steps)
return pd.DataFrame(vals,index = date_range_index)
# Augmented Dicky-Fuller Test
def adf_test(timeseries):
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries)
print("Test statistic: ", adf, 2)
print("P-value: ", pvalue)
print("Critical values: ", critical_values)
# source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course
stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0)
dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date'])
dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True)
dublin_air_data.set_index('date', inplace=True)
dublin_air_data.isnull().sum()
dublin_air_data[dublin_air_data.isna().any(axis=1)]
dublin_air_data.interpolate(inplace=True)
dublin_air_data.isnull().sum().sum()
dublin_air_data.drop(columns=['i_rain', 'i_temp', 'i_wetb', 'i_wdsp', 'i_wddir'], inplace=True)
dublin_air_data.nunique()
dublin_air_data.describe().T | code |
89127402/cell_12 | [
"text_html_output_1.png"
] | import json
import json
import plotly.graph_objs as go
import urllib.request
def read_geojson(url):
with urllib.request.urlopen(url) as url:
jdata = json.loads(url.read().decode())
return jdata
ireland_url = 'https://gist.githubusercontent.com/pnewall/9a122c05ba2865c3a58f15008548fbbd/raw/5bb4f84d918b871ee0e8b99f60dde976bb711d7c/ireland_counties.geojson'
jdata = read_geojson(ireland_url)
jdata['type'] | code |
49124403/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/jane-street-market-prediction/train.csv')
df_feature = pd.read_csv('../input/jane-street-market-prediction/features.csv')
df_test = pd.read_csv('../input/jane-street-market-prediction/example_test.csv')
df_sub = pd.read_csv('../input/jane-street-market-prediction/example_sample_submission.csv')
(df_train.shape, df_feature.shape, df_test.shape, df_sub.shape)
df_train.isnull().sum() | code |
49124403/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49124403/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/jane-street-market-prediction/train.csv')
df_feature = pd.read_csv('../input/jane-street-market-prediction/features.csv')
df_test = pd.read_csv('../input/jane-street-market-prediction/example_test.csv')
df_sub = pd.read_csv('../input/jane-street-market-prediction/example_sample_submission.csv')
(df_train.shape, df_feature.shape, df_test.shape, df_sub.shape)
df_train.isnull().sum()
df_train = df_train.dropna(axis=0, subset=['feature_129'])
df_train = df_train.dropna(axis=0, subset=['feature_127'])
df_train = df_train.dropna(axis=0, subset=['feature_125'])
df_train = df_train.dropna(axis=0, subset=['feature_121'])
df_train = df_train.dropna(axis=0, subset=['feature_123'])
df_train = df_train.dropna(axis=0, subset=['feature_118'])
df_train = df_train.dropna(axis=0, subset=['feature_118'])
df_train = df_train.dropna(axis=0, subset=['feature_117'])
df_train = df_train.dropna(axis=0, subset=['feature_110'])
df_train = df_train.dropna(axis=0, subset=['feature_93'])
df_train = df_train.dropna(axis=0, subset=['feature_59'])
df_train = df_train.dropna(axis=0, subset=['feature_58'])
df_train = df_train.dropna(axis=0, subset=['feature_56'])
df_train = df_train.dropna(axis=0, subset=['feature_55'])
df_train = df_train.dropna(axis=0, subset=['feature_45'])
df_train = df_train.dropna(axis=0, subset=['feature_31'])
df_train = df_train.dropna(axis=0, subset=['feature_21'])
df_train = df_train.dropna(axis=0, subset=['feature_3'])
df_train.shape | code |
49124403/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/jane-street-market-prediction/train.csv')
df_feature = pd.read_csv('../input/jane-street-market-prediction/features.csv')
df_test = pd.read_csv('../input/jane-street-market-prediction/example_test.csv')
df_sub = pd.read_csv('../input/jane-street-market-prediction/example_sample_submission.csv')
(df_train.shape, df_feature.shape, df_test.shape, df_sub.shape)
df_train.isnull().sum()
df_train = df_train.dropna(axis=0, subset=['feature_129'])
df_train = df_train.dropna(axis=0, subset=['feature_127'])
df_train = df_train.dropna(axis=0, subset=['feature_125'])
df_train = df_train.dropna(axis=0, subset=['feature_121'])
df_train = df_train.dropna(axis=0, subset=['feature_123'])
df_train = df_train.dropna(axis=0, subset=['feature_118'])
df_train = df_train.dropna(axis=0, subset=['feature_118'])
df_train = df_train.dropna(axis=0, subset=['feature_117'])
df_train = df_train.dropna(axis=0, subset=['feature_110'])
df_train = df_train.dropna(axis=0, subset=['feature_93'])
df_train = df_train.dropna(axis=0, subset=['feature_59'])
df_train = df_train.dropna(axis=0, subset=['feature_58'])
df_train = df_train.dropna(axis=0, subset=['feature_56'])
df_train = df_train.dropna(axis=0, subset=['feature_55'])
df_train = df_train.dropna(axis=0, subset=['feature_45'])
df_train = df_train.dropna(axis=0, subset=['feature_31'])
df_train = df_train.dropna(axis=0, subset=['feature_21'])
df_train = df_train.dropna(axis=0, subset=['feature_3'])
df_train.shape
df_train.isnull().sum() | code |
49124403/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/jane-street-market-prediction/train.csv')
df_feature = pd.read_csv('../input/jane-street-market-prediction/features.csv')
df_test = pd.read_csv('../input/jane-street-market-prediction/example_test.csv')
df_sub = pd.read_csv('../input/jane-street-market-prediction/example_sample_submission.csv')
(df_train.shape, df_feature.shape, df_test.shape, df_sub.shape) | code |
330932/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_countries = pd.read_csv('../input/Country.csv')
df_indicators = pd.read_csv('../input/Indicators.csv')
df_series = pd.read_csv('../input/Series.csv')
df_countries = pd.read_csv('../input/Country.csv')
df_indicators = pd.read_csv('../input/Indicators.csv')
df_series = pd.read_csv('../input/Series.csv')
df_countries[df_countries.CountryCode == 'IDN'] | code |
330932/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_countries = pd.read_csv('../input/Country.csv')
df_indicators = pd.read_csv('../input/Indicators.csv')
df_series = pd.read_csv('../input/Series.csv')
df_indicators[df_indicators.CountryName == 'Indonesia'].drop_duplicates('IndicatorCode') | code |
330932/cell_6 | [
"text_html_output_1.png"
] | from subprocess import check_output
from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
330932/cell_2 | [
"text_html_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
330932/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_countries = pd.read_csv('../input/Country.csv')
df_indicators = pd.read_csv('../input/Indicators.csv')
df_series = pd.read_csv('../input/Series.csv')
df_indicators[df_indicators.CountryName == 'Indonesia'].drop_duplicates('IndicatorCode')
len(df_indicators[df_indicators.CountryName == 'Indonesia'])
df_countries = pd.read_csv('../input/Country.csv')
df_indicators = pd.read_csv('../input/Indicators.csv')
df_series = pd.read_csv('../input/Series.csv')
df_indicators[df_indicators.CountryName == 'Indonesia'].drop_duplicates('IndicatorCode') | code |
330932/cell_10 | [
"text_plain_output_1.png"
] | import sqlite3
import sqlite3
sqlite_file = '../input/database.sqlite'
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
all_rows = c.fetchall()
print('1):', all_rows)
conn.close() | code |
330932/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_countries = pd.read_csv('../input/Country.csv')
df_indicators = pd.read_csv('../input/Indicators.csv')
df_series = pd.read_csv('../input/Series.csv')
df_indicators[df_indicators.CountryName == 'Indonesia'].drop_duplicates('IndicatorCode')
len(df_indicators[df_indicators.CountryName == 'Indonesia']) | code |
18128922/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | w2v_model = gensim.models.word2vec.Word2Vec(size=W2V_SIZE, window=W2V_WINDOW, min_count=W2V_MIN_COUNT, workers=8) | code |
18128922/cell_4 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text']
DATASET_ENCODING = 'ISO-8859-1'
TRAIN_SIZE = 0.8
TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+'
W2V_SIZE = 300
W2V_WINDOW = 7
W2V_EPOCH = 32
W2V_MIN_COUNT = 10
SEQUENCE_LENGTH = 300
EPOCHS = 8
BATCH_SIZE = 1024
POSITIVE = 'POSITIVE'
NEGATIVE = 'NEGATIVE'
NEUTRAL = 'NEUTRAL'
SENTIMENT_THRESHOLDS = (0.4, 0.7)
KERAS_MODEL = 'model.h5'
WORD2VEC_MODEL = 'model.w2v'
TOKENIZER_MODEL = 'tokenizer.pkl'
ENCODER_MODEL = 'encoder.pkl'
"""
Dataset details
target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)
ids: The id of the tweet ( 2087)
date: the date of the tweet (Sat May 16 23:58:44 UTC 2009)
flag: The query (lyx). If there is no query, then this value is NO_QUERY.
user: the user that tweeted (robotickilldozr)
text: the text of the tweet (Lyx is cool)
"""
dataset_filename = os.listdir('../input')[0]
dataset_path = os.path.join('..', 'input', dataset_filename)
print('Open file:', dataset_path)
df = pd.read_csv(dataset_path, encoding=DATASET_ENCODING, names=DATASET_COLUMNS)
print('Dataset size:', len(df)) | code |
18128922/cell_6 | [
"text_plain_output_1.png"
] | decode_map = {0: 'NEGATIVE', 2: 'NEUTRAL', 4: 'POSITIVE'}
def decode_sentiment(label):
return decode_map[int(label)]
df.target = df.target.apply(lambda x: decode_sentiment(x)) | code |
18128922/cell_2 | [
"text_plain_output_1.png"
] | import nltk
nltk.download('stopwords') | code |
18128922/cell_11 | [
"text_plain_output_1.png"
] | words = w2v_model.wv.vocab.keys()
vocab_size = len(words)
print('Vocab size', vocab_size) | code |
18128922/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM
from keras import utils
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import gensim
import re
import numpy as np
import os
from collections import Counter
import logging
import time
import pickle
import itertools
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
print(os.listdir('../input')) | code |
18128922/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text']
DATASET_ENCODING = 'ISO-8859-1'
TRAIN_SIZE = 0.8
TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+'
W2V_SIZE = 300
W2V_WINDOW = 7
W2V_EPOCH = 32
W2V_MIN_COUNT = 10
SEQUENCE_LENGTH = 300
EPOCHS = 8
BATCH_SIZE = 1024
POSITIVE = 'POSITIVE'
NEGATIVE = 'NEGATIVE'
NEUTRAL = 'NEUTRAL'
SENTIMENT_THRESHOLDS = (0.4, 0.7)
KERAS_MODEL = 'model.h5'
WORD2VEC_MODEL = 'model.w2v'
TOKENIZER_MODEL = 'tokenizer.pkl'
ENCODER_MODEL = 'encoder.pkl'
"""
Dataset details
target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)
ids: The id of the tweet ( 2087)
date: the date of the tweet (Sat May 16 23:58:44 UTC 2009)
flag: The query (lyx). If there is no query, then this value is NO_QUERY.
user: the user that tweeted (robotickilldozr)
text: the text of the tweet (Lyx is cool)
"""
dataset_filename = os.listdir('../input')[0]
dataset_path = os.path.join('..', 'input', dataset_filename)
df = pd.read_csv(dataset_path, encoding=DATASET_ENCODING, names=DATASET_COLUMNS)
df_train, df_test = train_test_split(df, test_size=1 - TRAIN_SIZE, random_state=42)
print('TRAIN size:', len(df_train))
print('TEST size:', len(df_test)) | code |
18128922/cell_8 | [
"text_plain_output_1.png"
] | documents = [_text.split() for _text in df_train.text]
print('training tweets count', len(documents)) | code |
18128922/cell_10 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | w2v_model.build_vocab(documents) | code |
18128922/cell_12 | [
"text_html_output_1.png"
] | w2v_model.train(documents, total_examples=len(documents), epochs=W2V_EPOCH) | code |
18128922/cell_5 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text']
DATASET_ENCODING = 'ISO-8859-1'
TRAIN_SIZE = 0.8
TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+'
W2V_SIZE = 300
W2V_WINDOW = 7
W2V_EPOCH = 32
W2V_MIN_COUNT = 10
SEQUENCE_LENGTH = 300
EPOCHS = 8
BATCH_SIZE = 1024
POSITIVE = 'POSITIVE'
NEGATIVE = 'NEGATIVE'
NEUTRAL = 'NEUTRAL'
SENTIMENT_THRESHOLDS = (0.4, 0.7)
KERAS_MODEL = 'model.h5'
WORD2VEC_MODEL = 'model.w2v'
TOKENIZER_MODEL = 'tokenizer.pkl'
ENCODER_MODEL = 'encoder.pkl'
"""
Dataset details
target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)
ids: The id of the tweet ( 2087)
date: the date of the tweet (Sat May 16 23:58:44 UTC 2009)
flag: The query (lyx). If there is no query, then this value is NO_QUERY.
user: the user that tweeted (robotickilldozr)
text: the text of the tweet (Lyx is cool)
"""
dataset_filename = os.listdir('../input')[0]
dataset_path = os.path.join('..', 'input', dataset_filename)
df = pd.read_csv(dataset_path, encoding=DATASET_ENCODING, names=DATASET_COLUMNS)
df.head(5) | code |
33096179/cell_13 | [
"text_html_output_1.png"
] | from past.builtins import xrange
import pandas as pd
import seaborn as sns
webpth = 'http://www.files.benlaken.com/documents/'
monsoon = pd.read_csv('../input/Monsoon_data.csv', parse_dates=['Date'])
monsoon.index = monsoon.Date
monsoon = monsoon.drop('Date', 1)
olou = pd.read_csv('../input/Olou_counts.csv', parse_dates=['Date'])
olou.index = olou.Date
olou = olou.drop('Date', 1)
# Plot the simple time series
my_ts = plt.figure()
my_ts.set_size_inches(10,5) # Specify the output size
ax1 = my_ts.add_subplot(211) # Add an axis frame object to the plot (i.e. a pannel)
ax2 = my_ts.add_subplot(212)
ax1.step(monsoon.index.date,monsoon.Precip,lw=1.0)
ax1.set_title(r'Monthly Precipitation and NM counts')
ax1.set_ylabel(r'Precipitation (mm)')
ax1.grid(True)
#ax1.set_yscale('log')
ax2.plot(olou.index.date,olou.Counts/1000,'r.',ms=3.0)
ax2.set_ylabel(r'Olou NM (cnt./min.$\times10^{3}$)')
ax2.set_xlabel('Date')
ax2.grid(True)
plt.show(my_ts)
my_ts.savefig('Monthly_ts.png',dpi=300)
def return_stderr(data):
"""Calculate uncertainty of a np array as Standard Error of the Mean"""
return np.nanstd(data) / np.sqrt(np.count_nonzero(data) - 1)
climo = {}
climo['means'] = [np.mean(monsoon.Precip[monsoon.index.month == mnth + 1]) for mnth in xrange(12)]
climo['error'] = [return_stderr(monsoon.Precip[monsoon.index.month == mnth + 1].values) for mnth in xrange(12)]
# -- Plot the climatology --
my_climo = plt.figure()
my_climo.set_size_inches(5,5)
ax1 = my_climo.add_subplot(111)
ax1.errorbar(x=range(12),y=climo['means'],yerr=climo['error'])
ax1.set_title(r'Precipitation climatology')
ax1.set_ylabel(r'Precipitation (mm)')
ax1.set_xlabel(r'Month')
ax1.set_xlim(0,11)
ax1.set_xticklabels(labels=['Jan','Mar','May','Jul','Sep','Nov'])
ax1.grid(True)
plt.show(my_climo)
my_climo.savefig('Monthly_climo.png',dpi=300)
delta = []
for date in monsoon.Precip.index:
delta.append(monsoon.Precip[date] - climo['means'][date.month - 1])
dseries = pd.Series(delta, index=monsoon.index)
def lookup_index(yr):
return (monsoon.index.year == yr) & (monsoon.index.month >= 5) & (monsoon.index.month <= 9)
mjjas = {}
mjjas['means'] = [np.mean(dseries[lookup_index(yr)]) for yr in xrange(1964, 2012, 1)]
mjjas['SEM'] = [return_stderr(dseries[lookup_index(yr)]) for yr in xrange(1964, 2012, 1)]
mjjas['sum'] = [np.sum(dseries[lookup_index(yr)]) for yr in xrange(1964, 2012, 1)]
sns.set(style='darkgrid')
yrange = xrange(1964, 2012, 1)
my_mjjas = plt.figure()
my_mjjas.set_size_inches(10, 5)
ax1 = my_mjjas.add_subplot(121)
ax2 = my_mjjas.add_subplot(122)
ax1.errorbar(x=yrange, y=mjjas['means'], yerr=mjjas['SEM'], fmt='.', ms=10)
ax1.set_xlim(min(yrange) - 1, max(yrange) + 1)
ax1.set_title('Mean MJJAS precipitation anomaly')
ax1.set_ylabel('$\\delta$ precipitation (mm/month)')
ax1.set_xlabel('Year')
ax1.grid(True)
sns.distplot(mjjas['means'], ax=ax2)
ax2.set_title('Distribution of MJJAS anomalies')
ax2.set_xlabel('$\\delta$ precipitation (mm/month)')
ax2.set_ylabel('Density')
plt.show(my_mjjas)
my_mjjas.savefig('delta_precip_pop.png', dpi=300) | code |
33096179/cell_6 | [
"image_output_1.png"
] | import pandas as pd
webpth = 'http://www.files.benlaken.com/documents/'
monsoon = pd.read_csv('../input/Monsoon_data.csv', parse_dates=['Date'])
monsoon.index = monsoon.Date
monsoon = monsoon.drop('Date', 1)
olou = pd.read_csv('../input/Olou_counts.csv', parse_dates=['Date'])
olou.index = olou.Date
olou = olou.drop('Date', 1)
monsoon.head() | code |
33096179/cell_2 | [
"image_output_1.png"
] | from __future__ import print_function, division, generators
import sys
print('Running Python {0}.{1}'.format(sys.version_info[:2][0], sys.version_info[:2][1]))
if sys.version_info[:2] > (3, 0):
print('Adding xrange for backwards compatibility'.format(sys.version_info[:2][0], sys.version_info[:2][1]))
from past.builtins import xrange
from scipy.stats.stats import pearsonr
import pandas as pd
import datetime as dt
from scipy.stats import kendalltau
import seaborn as sns
from random import randrange
sns.set(style='darkgrid') | code |
33096179/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
webpth = 'http://www.files.benlaken.com/documents/'
monsoon = pd.read_csv('../input/Monsoon_data.csv', parse_dates=['Date'])
monsoon.index = monsoon.Date
monsoon = monsoon.drop('Date', 1)
olou = pd.read_csv('../input/Olou_counts.csv', parse_dates=['Date'])
olou.index = olou.Date
olou = olou.drop('Date', 1)
monsoon.describe() | code |
33096179/cell_8 | [
"image_output_1.png"
] | import pandas as pd
webpth = 'http://www.files.benlaken.com/documents/'
monsoon = pd.read_csv('../input/Monsoon_data.csv', parse_dates=['Date'])
monsoon.index = monsoon.Date
monsoon = monsoon.drop('Date', 1)
olou = pd.read_csv('../input/Olou_counts.csv', parse_dates=['Date'])
olou.index = olou.Date
olou = olou.drop('Date', 1)
my_ts = plt.figure()
my_ts.set_size_inches(10, 5)
ax1 = my_ts.add_subplot(211)
ax2 = my_ts.add_subplot(212)
ax1.step(monsoon.index.date, monsoon.Precip, lw=1.0)
ax1.set_title('Monthly Precipitation and NM counts')
ax1.set_ylabel('Precipitation (mm)')
ax1.grid(True)
ax2.plot(olou.index.date, olou.Counts / 1000, 'r.', ms=3.0)
ax2.set_ylabel('Olou NM (cnt./min.$\\times10^{3}$)')
ax2.set_xlabel('Date')
ax2.grid(True)
plt.show(my_ts)
my_ts.savefig('Monthly_ts.png', dpi=300) | code |
33096179/cell_10 | [
"text_plain_output_1.png"
] | from past.builtins import xrange
import pandas as pd
webpth = 'http://www.files.benlaken.com/documents/'
monsoon = pd.read_csv('../input/Monsoon_data.csv', parse_dates=['Date'])
monsoon.index = monsoon.Date
monsoon = monsoon.drop('Date', 1)
olou = pd.read_csv('../input/Olou_counts.csv', parse_dates=['Date'])
olou.index = olou.Date
olou = olou.drop('Date', 1)
# Plot the simple time series
my_ts = plt.figure()
my_ts.set_size_inches(10,5) # Specify the output size
ax1 = my_ts.add_subplot(211) # Add an axis frame object to the plot (i.e. a pannel)
ax2 = my_ts.add_subplot(212)
ax1.step(monsoon.index.date,monsoon.Precip,lw=1.0)
ax1.set_title(r'Monthly Precipitation and NM counts')
ax1.set_ylabel(r'Precipitation (mm)')
ax1.grid(True)
#ax1.set_yscale('log')
ax2.plot(olou.index.date,olou.Counts/1000,'r.',ms=3.0)
ax2.set_ylabel(r'Olou NM (cnt./min.$\times10^{3}$)')
ax2.set_xlabel('Date')
ax2.grid(True)
plt.show(my_ts)
my_ts.savefig('Monthly_ts.png',dpi=300)
def return_stderr(data):
"""Calculate uncertainty of a np array as Standard Error of the Mean"""
return np.nanstd(data) / np.sqrt(np.count_nonzero(data) - 1)
climo = {}
climo['means'] = [np.mean(monsoon.Precip[monsoon.index.month == mnth + 1]) for mnth in xrange(12)]
climo['error'] = [return_stderr(monsoon.Precip[monsoon.index.month == mnth + 1].values) for mnth in xrange(12)]
my_climo = plt.figure()
my_climo.set_size_inches(5, 5)
ax1 = my_climo.add_subplot(111)
ax1.errorbar(x=range(12), y=climo['means'], yerr=climo['error'])
ax1.set_title('Precipitation climatology')
ax1.set_ylabel('Precipitation (mm)')
ax1.set_xlabel('Month')
ax1.set_xlim(0, 11)
ax1.set_xticklabels(labels=['Jan', 'Mar', 'May', 'Jul', 'Sep', 'Nov'])
ax1.grid(True)
plt.show(my_climo)
my_climo.savefig('Monthly_climo.png', dpi=300) | code |
73061721/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dtrain = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
dtrain['MSZoning'] | code |
73061721/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73061721/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dtrain = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
dtrain['MSZoning']
dtrain = dtrain.drop(missing_data[missing_data['Total'] > 1].index, 1)
dtrain['Electrical'] = dtrain['Electrical'].fillna(dtrain['Electrical'].mode()[0])
dtrain.isnull().sum().max() | code |
130001346/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
train.describe(include='all') | code |
130001346/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False) | code |
130001346/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
def groupby_mean_sort(df, col):
return df[[col, 'Survived']].groupby([col], as_index=False).mean().sort_values(by='Survived', ascending=False)
list_for_groupby_mean_sort = ['Pclass', 'Age', 'SibSp', 'Parch', 'Sex', 'Cabin', 'Embarked']
groupby_mean_sort(train, 'SibSp')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
groupby_mean_sort(train, 'Embarked') | code |
130001346/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
def groupby_mean_sort(df, col):
return df[[col, 'Survived']].groupby([col], as_index=False).mean().sort_values(by='Survived', ascending=False)
list_for_groupby_mean_sort = ['Pclass', 'Age', 'SibSp', 'Parch', 'Sex', 'Cabin', 'Embarked']
groupby_mean_sort(train, 'SibSp') | code |
130001346/cell_40 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
train.assign(Embarked_Sex_Pclass=train.Embarked + ':' + train.Sex + ':' + train.Pclass.astype(str))[['Embarked_Sex_Pclass', 'Survived', 'Age']].groupby(['Embarked_Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
import seaborn as sns
import matplotlib.pyplot as plt
g = sns.FacetGrid(train, col='Survived')
g.map(plt.hist, 'Age', bins=20) | code |
130001346/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
def groupby_mean_sort(df, col):
return df[[col, 'Survived']].groupby([col], as_index=False).mean().sort_values(by='Survived', ascending=False)
list_for_groupby_mean_sort = ['Pclass', 'Age', 'SibSp', 'Parch', 'Sex', 'Cabin', 'Embarked']
groupby_mean_sort(train, 'SibSp')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
groupby_mean_sort(train, 'Sex') | code |
130001346/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
train.head(10) | code |
130001346/cell_45 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
train.assign(Embarked_Sex_Pclass=train.Embarked + ':' + train.Sex + ':' + train.Pclass.astype(str))[['Embarked_Sex_Pclass', 'Survived', 'Age']].groupby(['Embarked_Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
import seaborn as sns
import matplotlib.pyplot as plt
g = sns.FacetGrid(train, col='Survived')
g.map(plt.hist, 'Age', bins=20)
grid = sns.FacetGrid(train, col='Survived', row='Pclass', aspect=1.6)
grid.map(plt.hist, 'Age', alpha=0.5, bins=20)
grid.add_legend()
grid = sns.FacetGrid(train, row='Embarked', aspect=1.6)
grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')
grid.add_legend() | code |
130001346/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False) | code |
130001346/cell_47 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
train.assign(Embarked_Sex_Pclass=train.Embarked + ':' + train.Sex + ':' + train.Pclass.astype(str))[['Embarked_Sex_Pclass', 'Survived', 'Age']].groupby(['Embarked_Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
import seaborn as sns
import matplotlib.pyplot as plt
g = sns.FacetGrid(train, col='Survived')
g.map(plt.hist, 'Age', bins=20)
grid = sns.FacetGrid(train, col='Survived', row='Pclass', aspect=1.6)
grid.map(plt.hist, 'Age', alpha=0.5, bins=20)
grid.add_legend()
grid = sns.FacetGrid(train, row='Embarked', aspect=1.6)
grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')
grid.add_legend()
grid = sns.FacetGrid(train, row='Embarked', col='Survived', aspect=1.6)
grid.map(sns.barplot, 'Sex', 'Fare', alpha=0.5, ci=None)
grid.add_legend() | code |
130001346/cell_35 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
train.assign(Embarked_Sex_Pclass=train.Embarked + ':' + train.Sex + ':' + train.Pclass.astype(str))[['Embarked_Sex_Pclass', 'Survived', 'Age']].groupby(['Embarked_Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False) | code |
130001346/cell_43 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
train.assign(Embarked_Sex_Pclass=train.Embarked + ':' + train.Sex + ':' + train.Pclass.astype(str))[['Embarked_Sex_Pclass', 'Survived', 'Age']].groupby(['Embarked_Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
import seaborn as sns
import matplotlib.pyplot as plt
g = sns.FacetGrid(train, col='Survived')
g.map(plt.hist, 'Age', bins=20)
grid = sns.FacetGrid(train, col='Survived', row='Pclass', aspect=1.6)
grid.map(plt.hist, 'Age', alpha=0.5, bins=20)
grid.add_legend() | code |
130001346/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False) | code |
130001346/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
def groupby_mean_sort(df, col):
return df[[col, 'Survived']].groupby([col], as_index=False).mean().sort_values(by='Survived', ascending=False)
list_for_groupby_mean_sort = ['Pclass', 'Age', 'SibSp', 'Parch', 'Sex', 'Cabin', 'Embarked']
groupby_mean_sort(train, 'SibSp')
groupby_mean_sort(train, 'Parch') | code |
130001346/cell_53 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
train.assign(Embarked_Sex_Pclass=train.Embarked + ':' + train.Sex + ':' + train.Pclass.astype(str))[['Embarked_Sex_Pclass', 'Survived', 'Age']].groupby(['Embarked_Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
combine = [train, test]
print('Before', train.shape, test.shape, combine[0].shape, combine[1].shape)
for i, df in enumerate(combine):
df = df.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1)
combine[i] = df
('After', train.shape, test.shape, combine[0].shape, combine[1].shape) | code |
130001346/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
train.info()
test.info() | code |
130001346/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False) | code |
130001346/cell_37 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
gender_submission_df = pd.read_csv('../input/titanic/gender_submission.csv')
def groupby_mean_sort(df, col):
return df[[col, 'Survived']].groupby([col], as_index=False).mean().sort_values(by='Survived', ascending=False)
list_for_groupby_mean_sort = ['Pclass', 'Age', 'SibSp', 'Parch', 'Sex', 'Cabin', 'Embarked']
groupby_mean_sort(train, 'SibSp')
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived']].groupby(['Parch_SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Parch_and_SibSp_col = train.Parch.astype(str) + ':' + train.SibSp.astype(str)
train.assign(Parch_SibSp=Parch_and_SibSp_col)[['Parch_SibSp', 'Survived', 'Pclass', 'Age']].groupby(['Parch_SibSp'], as_index=False).median().sort_values(by='Survived', ascending=False)
train.assign(Sex_Pclass=train.Sex.astype(str) + ':' + train.Pclass.astype(str))[['Sex_Pclass', 'Survived', 'Pclass', 'Age']].groupby(['Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
train.assign(Embarked_Sex_Pclass=train.Embarked + ':' + train.Sex + ':' + train.Pclass.astype(str))[['Embarked_Sex_Pclass', 'Survived', 'Age']].groupby(['Embarked_Sex_Pclass'], as_index=False).mean(numeric_only=True).sort_values(by='Survived', ascending=False)
groupby_mean_sort(train, 'Age') | code |
104127568/cell_21 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_html_output_3.png"
] | from datetime import timedelta
import pandas as pd
df = pd.read_csv('../input/flo-data2/flo_data_20k.csv')
date_columns = df.columns[df.columns.str.contains('date')]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
today_date = df['last_order_date'].max() + timedelta(days=2)
rfm_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
rfm_df.columns = ['recency', 'frequency', 'monetary']
rfm_df['RECENCY_SCORE'] = pd.qcut(rfm_df['recency'], 5, labels=[5, 4, 3, 2, 1])
rfm_df['FREQUENCY_SCORE'] = pd.qcut(rfm_df['frequency'].rank(method='first'), 5, labels=[1, 2, 3, 4, 5])
rfm_df['MONETARY_SCORE'] = pd.qcut(rfm_df['monetary'], 5, labels=[1, 2, 3, 4, 5])
rfm_df['RF_SCORE'] = rfm_df[['RECENCY_SCORE', 'FREQUENCY_SCORE']].astype(str).apply(lambda x: ''.join(x), axis=1)
seg_map = {'[1-2][1-2]': 'hibernating', '[1-2][3-4]': 'at_risk', '[1-2]5': 'cant_loose', '3[1-2]': 'about_to_sleep', '33': 'need_attention', '[3-4][4-5]': 'loyal_customers', '41': 'promising', '51': 'new_customers', '[4-5][2-3]': 'potential_loyalists', '5[4-5]': 'champions'}
rfm_df['SEGMENT'] = rfm_df['RF_SCORE'].replace(seg_map, regex=True)
kmeans_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
kmeans_df.columns = ['recency', 'frequency', 'monetary']
kmeans_df.groupby('cluster').agg({'recency': ['count', 'mean', 'min', 'max'], 'frequency': ['mean', 'min', 'max'], 'monetary': ['mean', 'min', 'max']})
kmeans_df.groupby(['cluster', 'segment']).agg({'recency': ['count', 'mean', 'max', 'min'], 'frequency': ['mean', 'max', 'min'], 'monetary': ['mean', 'max', 'min']}) | code |
104127568/cell_9 | [
"text_html_output_1.png"
] | from datetime import timedelta
import pandas as pd
df = pd.read_csv('../input/flo-data2/flo_data_20k.csv')
date_columns = df.columns[df.columns.str.contains('date')]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
today_date = df['last_order_date'].max() + timedelta(days=2)
rfm_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
rfm_df.columns = ['recency', 'frequency', 'monetary']
rfm_df['RECENCY_SCORE'] = pd.qcut(rfm_df['recency'], 5, labels=[5, 4, 3, 2, 1])
rfm_df['FREQUENCY_SCORE'] = pd.qcut(rfm_df['frequency'].rank(method='first'), 5, labels=[1, 2, 3, 4, 5])
rfm_df['MONETARY_SCORE'] = pd.qcut(rfm_df['monetary'], 5, labels=[1, 2, 3, 4, 5])
rfm_df['RF_SCORE'] = rfm_df[['RECENCY_SCORE', 'FREQUENCY_SCORE']].astype(str).apply(lambda x: ''.join(x), axis=1)
seg_map = {'[1-2][1-2]': 'hibernating', '[1-2][3-4]': 'at_risk', '[1-2]5': 'cant_loose', '3[1-2]': 'about_to_sleep', '33': 'need_attention', '[3-4][4-5]': 'loyal_customers', '41': 'promising', '51': 'new_customers', '[4-5][2-3]': 'potential_loyalists', '5[4-5]': 'champions'}
rfm_df['SEGMENT'] = rfm_df['RF_SCORE'].replace(seg_map, regex=True)
rfm_df.groupby('SEGMENT').agg({'recency': ['count', 'mean', 'min', 'max'], 'frequency': ['mean', 'min', 'max'], 'monetary': ['mean', 'min', 'max']}) | code |
104127568/cell_11 | [
"text_html_output_1.png"
] | from datetime import timedelta
from plotly.offline import iplot
import pandas as pd
import plotly.graph_objs as go
df = pd.read_csv('../input/flo-data2/flo_data_20k.csv')
date_columns = df.columns[df.columns.str.contains('date')]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
today_date = df['last_order_date'].max() + timedelta(days=2)
rfm_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
rfm_df.columns = ['recency', 'frequency', 'monetary']
rfm_df['RECENCY_SCORE'] = pd.qcut(rfm_df['recency'], 5, labels=[5, 4, 3, 2, 1])
rfm_df['FREQUENCY_SCORE'] = pd.qcut(rfm_df['frequency'].rank(method='first'), 5, labels=[1, 2, 3, 4, 5])
rfm_df['MONETARY_SCORE'] = pd.qcut(rfm_df['monetary'], 5, labels=[1, 2, 3, 4, 5])
rfm_df['RF_SCORE'] = rfm_df[['RECENCY_SCORE', 'FREQUENCY_SCORE']].astype(str).apply(lambda x: ''.join(x), axis=1)
seg_map = {'[1-2][1-2]': 'hibernating', '[1-2][3-4]': 'at_risk', '[1-2]5': 'cant_loose', '3[1-2]': 'about_to_sleep', '33': 'need_attention', '[3-4][4-5]': 'loyal_customers', '41': 'promising', '51': 'new_customers', '[4-5][2-3]': 'potential_loyalists', '5[4-5]': 'champions'}
rfm_df['SEGMENT'] = rfm_df['RF_SCORE'].replace(seg_map, regex=True)
rfm_df.groupby('SEGMENT').agg({'recency': ['count', 'mean', 'min', 'max'], 'frequency': ['mean', 'min', 'max'], 'monetary': ['mean', 'min', 'max']})
def cluster_visualizer(dataframe, cluster, lim):
trace1 = go.Bar(x=dataframe.groupby(cluster).agg({'recency': 'mean'}).reset_index()[cluster], text=round(dataframe.groupby(cluster).agg({'recency': 'mean'}).reset_index()['recency'], 2), textposition='auto', y=dataframe.groupby(cluster).agg({'recency': 'mean'}).reset_index()['recency'], name='Recency', textfont=dict(size=12), marker=dict(color='#F33F19', opacity=0.65))
trace2 = go.Bar(x=dataframe.groupby(cluster).agg({'frequency': 'mean'}).reset_index()[cluster], text=round(dataframe.groupby(cluster).agg({'frequency': 'mean'}).reset_index()['frequency'], 2), textposition='auto', y=dataframe.groupby(cluster).agg({'frequency': 'mean'}).reset_index()['frequency'], name='Frequency', textfont=dict(size=12), marker=dict(color='#1C19F3', opacity=0.65))
trace3 = go.Bar(x=dataframe.groupby(cluster).agg({'monetary': 'mean'}).reset_index()[cluster], text=round(dataframe.groupby(cluster).agg({'monetary': 'mean'}).reset_index()['monetary'], 2), textposition='auto', y=dataframe.groupby(cluster).agg({'monetary': 'mean'}).reset_index()['monetary'], name='Monetary', textfont=dict(size=12), marker=dict(color='#F3193D', opacity=0.65))
trace = {'trace1': [trace1, 'Average Recency', 'Clusters', 'Recency', lim[0]], 'trace2': [trace2, 'Average Frequency', 'Clusters', 'Frequency', lim[1]], 'trace3': [trace3, 'Average Monetary', 'Clusters', 'Monetary', lim[2]]}
for i in ['trace1', 'trace2', 'trace3']:
layout = go.Layout(title={'text': trace[i][1], 'y': 0.9, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'}, xaxis=dict(title=trace[i][2]), yaxis=dict(title=trace[i][3]), template='plotly_white')
fig = go.Figure(data=[trace[i][0]], layout=layout)
fig.update_yaxes(range=[0, trace[i][4]], automargin=True)
lim_list = [250, 15, 1500]
cluster_visualizer(rfm_df, 'SEGMENT', lim_list) | code |
104127568/cell_18 | [
"text_html_output_1.png"
] | from datetime import timedelta
from plotly.offline import iplot
import pandas as pd
import plotly.graph_objs as go
df = pd.read_csv('../input/flo-data2/flo_data_20k.csv')
date_columns = df.columns[df.columns.str.contains('date')]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
today_date = df['last_order_date'].max() + timedelta(days=2)
rfm_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
rfm_df.columns = ['recency', 'frequency', 'monetary']
rfm_df['RECENCY_SCORE'] = pd.qcut(rfm_df['recency'], 5, labels=[5, 4, 3, 2, 1])
rfm_df['FREQUENCY_SCORE'] = pd.qcut(rfm_df['frequency'].rank(method='first'), 5, labels=[1, 2, 3, 4, 5])
rfm_df['MONETARY_SCORE'] = pd.qcut(rfm_df['monetary'], 5, labels=[1, 2, 3, 4, 5])
rfm_df['RF_SCORE'] = rfm_df[['RECENCY_SCORE', 'FREQUENCY_SCORE']].astype(str).apply(lambda x: ''.join(x), axis=1)
seg_map = {'[1-2][1-2]': 'hibernating', '[1-2][3-4]': 'at_risk', '[1-2]5': 'cant_loose', '3[1-2]': 'about_to_sleep', '33': 'need_attention', '[3-4][4-5]': 'loyal_customers', '41': 'promising', '51': 'new_customers', '[4-5][2-3]': 'potential_loyalists', '5[4-5]': 'champions'}
rfm_df['SEGMENT'] = rfm_df['RF_SCORE'].replace(seg_map, regex=True)
rfm_df.groupby('SEGMENT').agg({'recency': ['count', 'mean', 'min', 'max'], 'frequency': ['mean', 'min', 'max'], 'monetary': ['mean', 'min', 'max']})
def cluster_visualizer(dataframe, cluster, lim):
trace1 = go.Bar(x=dataframe.groupby(cluster).agg({'recency': 'mean'}).reset_index()[cluster], text=round(dataframe.groupby(cluster).agg({'recency': 'mean'}).reset_index()['recency'], 2), textposition='auto', y=dataframe.groupby(cluster).agg({'recency': 'mean'}).reset_index()['recency'], name='Recency', textfont=dict(size=12), marker=dict(color='#F33F19', opacity=0.65))
trace2 = go.Bar(x=dataframe.groupby(cluster).agg({'frequency': 'mean'}).reset_index()[cluster], text=round(dataframe.groupby(cluster).agg({'frequency': 'mean'}).reset_index()['frequency'], 2), textposition='auto', y=dataframe.groupby(cluster).agg({'frequency': 'mean'}).reset_index()['frequency'], name='Frequency', textfont=dict(size=12), marker=dict(color='#1C19F3', opacity=0.65))
trace3 = go.Bar(x=dataframe.groupby(cluster).agg({'monetary': 'mean'}).reset_index()[cluster], text=round(dataframe.groupby(cluster).agg({'monetary': 'mean'}).reset_index()['monetary'], 2), textposition='auto', y=dataframe.groupby(cluster).agg({'monetary': 'mean'}).reset_index()['monetary'], name='Monetary', textfont=dict(size=12), marker=dict(color='#F3193D', opacity=0.65))
trace = {'trace1': [trace1, 'Average Recency', 'Clusters', 'Recency', lim[0]], 'trace2': [trace2, 'Average Frequency', 'Clusters', 'Frequency', lim[1]], 'trace3': [trace3, 'Average Monetary', 'Clusters', 'Monetary', lim[2]]}
for i in ['trace1', 'trace2', 'trace3']:
layout = go.Layout(title={'text': trace[i][1], 'y': 0.9, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'}, xaxis=dict(title=trace[i][2]), yaxis=dict(title=trace[i][3]), template='plotly_white')
fig = go.Figure(data=[trace[i][0]], layout=layout)
fig.update_yaxes(range=[0, trace[i][4]], automargin=True)
lim_list = [250, 15, 1500]
cluster_visualizer(rfm_df, 'SEGMENT', lim_list)
kmeans_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
kmeans_df.columns = ['recency', 'frequency', 'monetary']
kmeans_df.groupby('cluster').agg({'recency': ['count', 'mean', 'min', 'max'], 'frequency': ['mean', 'min', 'max'], 'monetary': ['mean', 'min', 'max']})
lim_list = [350, 10, 1000]
cluster_visualizer(kmeans_df, 'cluster', lim_list) | code |
104127568/cell_15 | [
"text_html_output_4.png",
"text_html_output_2.png",
"text_html_output_3.png"
] | from datetime import timedelta
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from yellowbrick.cluster import KElbowVisualizer
import pandas as pd
df = pd.read_csv('../input/flo-data2/flo_data_20k.csv')
date_columns = df.columns[df.columns.str.contains('date')]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
today_date = df['last_order_date'].max() + timedelta(days=2)
rfm_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
rfm_df.columns = ['recency', 'frequency', 'monetary']
rfm_df['RECENCY_SCORE'] = pd.qcut(rfm_df['recency'], 5, labels=[5, 4, 3, 2, 1])
rfm_df['FREQUENCY_SCORE'] = pd.qcut(rfm_df['frequency'].rank(method='first'), 5, labels=[1, 2, 3, 4, 5])
rfm_df['MONETARY_SCORE'] = pd.qcut(rfm_df['monetary'], 5, labels=[1, 2, 3, 4, 5])
rfm_df['RF_SCORE'] = rfm_df[['RECENCY_SCORE', 'FREQUENCY_SCORE']].astype(str).apply(lambda x: ''.join(x), axis=1)
seg_map = {'[1-2][1-2]': 'hibernating', '[1-2][3-4]': 'at_risk', '[1-2]5': 'cant_loose', '3[1-2]': 'about_to_sleep', '33': 'need_attention', '[3-4][4-5]': 'loyal_customers', '41': 'promising', '51': 'new_customers', '[4-5][2-3]': 'potential_loyalists', '5[4-5]': 'champions'}
rfm_df['SEGMENT'] = rfm_df['RF_SCORE'].replace(seg_map, regex=True)
kmeans_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
kmeans_df.columns = ['recency', 'frequency', 'monetary']
scaler = MinMaxScaler((0, 1))
kmeans_df2 = scaler.fit_transform(kmeans_df)
kmeans = KMeans()
elbow = KElbowVisualizer(kmeans, k=(2, 20)).fit(kmeans_df2)
elbow.show() | code |
104127568/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from datetime import timedelta
import pandas as pd
df = pd.read_csv('../input/flo-data2/flo_data_20k.csv')
date_columns = df.columns[df.columns.str.contains('date')]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
today_date = df['last_order_date'].max() + timedelta(days=2)
rfm_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
rfm_df.columns = ['recency', 'frequency', 'monetary']
rfm_df['RECENCY_SCORE'] = pd.qcut(rfm_df['recency'], 5, labels=[5, 4, 3, 2, 1])
rfm_df['FREQUENCY_SCORE'] = pd.qcut(rfm_df['frequency'].rank(method='first'), 5, labels=[1, 2, 3, 4, 5])
rfm_df['MONETARY_SCORE'] = pd.qcut(rfm_df['monetary'], 5, labels=[1, 2, 3, 4, 5])
rfm_df['RF_SCORE'] = rfm_df[['RECENCY_SCORE', 'FREQUENCY_SCORE']].astype(str).apply(lambda x: ''.join(x), axis=1)
seg_map = {'[1-2][1-2]': 'hibernating', '[1-2][3-4]': 'at_risk', '[1-2]5': 'cant_loose', '3[1-2]': 'about_to_sleep', '33': 'need_attention', '[3-4][4-5]': 'loyal_customers', '41': 'promising', '51': 'new_customers', '[4-5][2-3]': 'potential_loyalists', '5[4-5]': 'champions'}
rfm_df['SEGMENT'] = rfm_df['RF_SCORE'].replace(seg_map, regex=True)
kmeans_df = df.groupby('master_id').agg({'last_order_date': lambda date: (today_date - date.max()).days, 'total_order_num': lambda num: num.sum(), 'total_value': lambda value: value.sum()})
kmeans_df.columns = ['recency', 'frequency', 'monetary']
kmeans_df.groupby('cluster').agg({'recency': ['count', 'mean', 'min', 'max'], 'frequency': ['mean', 'min', 'max'], 'monetary': ['mean', 'min', 'max']}) | code |
106207199/cell_4 | [
"text_plain_output_1.png"
] | !pip install pandas | code |
128008954/cell_42 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
matches.City.unique()
matches.columns
matches.Season = matches.Season.replace(to_replace='2007/08', value='2008')
matches.Season = matches.Season.replace(to_replace='2009/10', value='2010')
matches.Season = matches.Season.replace(to_replace='2020/21', value='2020')
matches.City.fillna(matches.Venue, inplace=True)
matches.City.unique() | code |
128008954/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match
df.WonBy.mode()
df.SuperOver.value_counts()
df.Season.value_counts() | code |
128008954/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method | code |
128008954/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match
df.WonBy.mode()
df.SuperOver.value_counts()
df.Season.value_counts()
df.TossDecision.value_counts()
df.Date.value_counts() | code |
128008954/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.info() | code |
128008954/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match
df.WonBy.mode()
df.SuperOver.value_counts()
df.Season.value_counts()
df.TossDecision.value_counts() | code |
128008954/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
matches | code |
128008954/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
delivery = delivery.sort_values(by=['ID'])
delivery.head() | code |
128008954/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts() | code |
128008954/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
delivery | code |
128008954/cell_39 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
matches.City.unique()
matches.columns | code |
128008954/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match
df.WonBy.mode()
df.SuperOver.value_counts()
df.Season.value_counts()
df.TossDecision.value_counts()
df.Date.value_counts()
df.head(10) | code |
128008954/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df | code |
128008954/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match
df.WonBy.mode()
df.SuperOver.value_counts() | code |
128008954/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128008954/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue | code |
128008954/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match
df.WonBy.mode() | code |
128008954/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
matches | code |
128008954/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape | code |
128008954/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr | code |
128008954/cell_38 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
matches.City.unique() | code |
128008954/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match | code |
128008954/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
delivery.head() | code |
128008954/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts() | code |
128008954/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum() | code |
128008954/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts()
df['WonBy'][df.WonBy == 'Wickets'].value_counts()
df['WonBy'][df.WonBy == 'Runs'].corr
df.Player_of_Match
df.WonBy.mode()
df.SuperOver.value_counts()
df.Season.value_counts()
df.TossDecision.value_counts()
df.Date.value_counts()
df.sort_values(by=['ID']) | code |
128008954/cell_37 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
delivery = delivery.sort_values(by=['ID'])
delivery = pd.get_dummies(delivery, columns=['extra_type'])
matches['my_dates'] = pd.to_datetime(matches['Date'])
matches['day_of_week'] = matches['my_dates'].dt.day_name()
matches['my_dates'].value_counts() | code |
128008954/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns
df.MatchNumber.value_counts()
df.Venue
df.shape
df.method
df.Margin.sum()
df['WonBy'][df.WonBy == 'Runs'].value_counts() | code |
128008954/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
df.columns | code |
128008954/cell_36 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
df
matches = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Matches_2008_2022.csv')
delivery = pd.read_csv('/kaggle/input/ipl-2008-to-2021-all-match-dataset/IPL_Ball_by_Ball_2008_2022.csv')
delivery = delivery.sort_values(by=['ID'])
delivery = pd.get_dummies(delivery, columns=['extra_type'])
delivery.rename(columns={'extra_type_byes': 'byes', 'extra_type_legbyes': 'legbyes', 'extra_type_noballs': 'noballs', 'extra_type_wides': 'wides', 'extra_type_penalty': 'penalty'}, inplace=True)
delivery | code |
33107759/cell_4 | [
"text_plain_output_1.png"
] | from cord import ResearchPapers
from cord import ResearchPapers
papers = ResearchPapers.load() | code |
33107759/cell_6 | [
"text_html_output_1.png"
] | from cord import ResearchPapers
from cord import ResearchPapers
papers = ResearchPapers.load()
covid_papers = papers.since_sarscov2()
covid_papers.searchbar('relationships between testing tracing efforts and public health outcomes') | code |
33107759/cell_2 | [
"text_plain_output_1.png"
] | !pip install git+https://github.com/dgunning/cord19.git | code |
128042012/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train_set = pd.read_csv('/kaggle/input/aviakompaniya/train_dataset.csv')
test_set = pd.read_csv('/kaggle/input/aviakompaniya/test_dataset.csv')
sample = pd.read_csv('/kaggle/input/aviakompaniya/sample_submission.csv')
df = train_set.dropna()
df_100 = df[df['Flight Distance'] > 100]
df_100.select_dtypes('object').columns
Gender = list(df_100.Gender) + list(df_100.Gender)
Customer_Type = list(df_100['Customer Type']) + list(df_100['Customer Type'])
Type_of_Travel = list(df_100['Type of Travel']) + list(df_100['Type of Travel'])
Classes = list(df_100['Class']) + list(df_100['Class'])
df_100['Gender'] = pd.Categorical(df_100['Gender']).codes
df_100['Customer Type'] = pd.Categorical(df_100['Customer Type']).codes
df_100['Type of Travel'] = pd.Categorical(df_100['Type of Travel']).codes
df_100['Class'] = pd.Categorical(df_100['Class']).codes | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.