path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
122255862/cell_17 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum() | code |
122255862/cell_35 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.graph_objs as go
import plotly.offline as offline
import seaborn as sns
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
wp = {'linewidth': 1, 'edgecolor': 'black'}
plt.axis('equal')
plt.tight_layout()
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class['Age'].agg(['min', 'max']).plot(kind='bar')
titanic['avg_fare_class'] = titanic.groupby('Pclass')['Fare'].transform(lambda x: x.mean())
tita_df = titanic.groupby(['Embarked', 'Sex']).mean()
tita_df
trace = go.Scatter(x=titanic['PassengerId'], y=titanic['Age'], mode='markers', marker=dict(color=titanic['Fare'], colorscale='Portland', showscale=True))
data = [trace]
layout = go.Layout(height=600, width=900, title='Who paid how much?', hovermode='closest')
fig = go.Figure(data=data, layout=layout)
offline.iplot(fig) | code |
122255862/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic['avg_fare_class'] = titanic.groupby('Pclass')['Fare'].transform(lambda x: x.mean())
tita_df = titanic.groupby(['Embarked', 'Sex']).mean()
tita_df | code |
122255862/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class.max()
titanic_class['Fare'].agg(['sum', 'max']) | code |
122255862/cell_14 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1) | code |
122255862/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
wp = {'linewidth': 1, 'edgecolor': 'black'}
plt.axis('equal')
plt.tight_layout()
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class['Age'].agg(['min', 'max']).plot(kind='bar')
plt.xlabel('Pclass')
plt.ylabel('Age')
plt.title('Minimum, maximum age for each class') | code |
122255862/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
wp = {'linewidth': 1, 'edgecolor': 'black'}
plt.pie(titanic_gender, labels=['Female', 'Male'], colors=['pink', 'yellow'], autopct='%0.1f%%', explode=[0, 0.05], wedgeprops=wp)
plt.title('Pie chart for gender in titanic')
plt.legend(loc='upper right')
plt.axis('equal')
plt.tight_layout() | code |
122255862/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class.max()
titanic_class.filter(lambda x: x['Age'].mean() < 38)
titanic_class.filter(lambda x: x['Age'].mean() < 38)['Fare'].mean()
print('Max fare for people in age under 38:')
titanic_class.filter(lambda x: x['Age'].mean() < 38)['Fare'].max() | code |
122255862/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class | code |
122255862/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
print(f'The table above contains: \nrows: {titanic.shape[0]} \ncolumns: {titanic.shape[1]}') | code |
73090244/cell_13 | [
"text_html_output_1.png"
] | code |
|
73090244/cell_4 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import cudf
PATH = '/kaggle/input/optiver-realized-volatility-prediction'
def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'):
file_name = f'{path}/{mode}.csv'
return cudf.read_csv(file_name)
dev_df = load_data('train', path=PATH)
SCALE = 100
dev_df['target'] = SCALE * dev_df['target']
stock_ids = dev_df['stock_id'].unique()
len(stock_ids) | code |
73090244/cell_6 | [
"text_plain_output_1.png"
] | import cudf
import glob
PATH = '/kaggle/input/optiver-realized-volatility-prediction'
def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'):
file_name = f'{path}/{mode}.csv'
return cudf.read_csv(file_name)
dev_df = load_data('train', path=PATH)
order_book_training = glob.glob(f'{PATH}/book_train.parquet/*/*')
order_book_test = glob.glob(f'{PATH}/book_test.parquet/*/*')
(len(order_book_training), len(order_book_test))
trades_training = glob.glob(f'{PATH}/trade_train.parquet/*/*')
trades_test = glob.glob(f'{PATH}/trade_test.parquet/*/*')
(len(trades_training), len(trades_test)) | code |
73090244/cell_2 | [
"text_plain_output_1.png"
] | import cupy as cp
import cudf
import cuml
import glob
from tqdm import tqdm
import lightgbm as lgb
import numpy as np
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt | code |
73090244/cell_8 | [
"text_plain_output_1.png"
] | code |
|
73090244/cell_16 | [
"text_plain_output_1.png"
] | from tqdm import tqdm
import cu_utils.transform as cutran
import cudf
import cupy as cp
import glob
PATH = '/kaggle/input/optiver-realized-volatility-prediction'
def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'):
file_name = f'{path}/{mode}.csv'
return cudf.read_csv(file_name)
dev_df = load_data('train', path=PATH)
SCALE = 100
dev_df['target'] = SCALE * dev_df['target']
stock_ids = dev_df['stock_id'].unique()
len(stock_ids)
order_book_training = glob.glob(f'{PATH}/book_train.parquet/*/*')
order_book_test = glob.glob(f'{PATH}/book_test.parquet/*/*')
(len(order_book_training), len(order_book_test))
trades_training = glob.glob(f'{PATH}/trade_train.parquet/*/*')
trades_test = glob.glob(f'{PATH}/trade_test.parquet/*/*')
(len(trades_training), len(trades_test))
import cu_utils.transform as cutran
def log_diff(df, in_col, null_val):
df['logx'] = df[in_col].log()
df['logx_shifted'] = df[['time_id', 'logx']].groupby('time_id', method='cudf').apply_grouped(cutran.get_cu_shift_transform(shift_by=1, null_val=null_val), incols={'logx': 'x'}, outcols=dict(y_out=cp.float32), tpb=32)['y_out']
df['keep_row'] = df[f'logx_shifted'] != null_val
return df['logx'] - df['logx_shifted']
def realized_vol(log_return):
return cp.sqrt((log_return * log_return).sum())
def extract_raw_book_features(df, null_val=-9999):
df['wap1'] = (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
df['wap2'] = (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
df['wap3'] = (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
df['wap4'] = (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
for n in [1, 2, 3, 4]:
df[f'log_return{n}'] = log_diff(df, in_col=f'wap{n}', null_val=null_val)
df[f'realized_vol{n}'] = df[f'log_return{n}'] ** 2
df['wap_balance'] = abs(df['wap1'] - df['wap2'])
df['price_spread'] = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1']) / 2)
df['price_spread1'] = (df['ask_price2'] - df['bid_price2']) / ((df['ask_price2'] + df['bid_price2']) / 2)
df['bid_spread'] = df['bid_price1'] - df['bid_price2']
df['ask_spread'] = df['ask_price1'] - df['ask_price2']
df['bid_ask_spread'] = abs(df['bid_spread'] - df['ask_spread'])
df['total_volume'] = df['ask_size1'] + df['ask_size2'] + (df['bid_size1'] + df['bid_size2'])
df['volume_imbalance'] = abs(df['ask_size1'] + df['ask_size2'] - (df['bid_size1'] + df['bid_size2']))
df = df[df['keep_row']]
return df
def extract_raw_trade_features(df, null_val=-9999):
df['realized_vol_trade'] = log_diff(df, in_col=f'price', null_val=null_val) ** 2
df['amount'] = df['price'] * df['size']
df = df[df['keep_row']]
return df
def agg(df, feature_dict):
agg_df = df.groupby('time_id').agg(feature_dict).reset_index()
def f(x):
if x[1] == '':
return x[0]
return x[0] + '_' + x[1]
agg_df.columns = [f(x) for x in agg_df.columns]
col_vol = [col for col in agg_df.columns if 'realized_vol' in col and ('mean' in col or 'sum' in col)]
agg_df[col_vol] = agg_df[col_vol].sqrt()
return agg_df
def extract_book_stats(df):
feature_dict = {'wap1': ['sum', 'std'], 'wap2': ['sum', 'std'], 'wap3': ['sum', 'std'], 'wap4': ['sum', 'std'], 'realized_vol1': ['sum'], 'realized_vol2': ['sum'], 'realized_vol3': ['sum'], 'realized_vol4': ['sum'], 'price_spread': ['sum', 'max'], 'price_spread1': ['sum', 'max'], 'wap_balance': ['sum', 'max'], 'bid_spread': ['sum', 'max'], 'ask_spread': ['sum', 'max'], 'total_volume': ['sum', 'max'], 'volume_imbalance': ['sum', 'max'], 'bid_ask_spread': ['sum', 'max']}
return agg(df, feature_dict)
def extract_book_stats_time(df):
feature_dict = {'realized_vol1': ['sum'], 'realized_vol2': ['sum'], 'realized_vol3': ['sum'], 'realized_vol4': ['sum']}
return agg(df, feature_dict)
def extract_trade_stats(df):
feature_dict = {'realized_vol_trade': ['sum'], 'seconds_in_bucket': ['count'], 'size': ['sum', 'max', 'min'], 'order_count': ['sum', 'max'], 'amount': ['sum', 'max', 'min']}
return agg(df, feature_dict)
def extract_trade_stats_time(df):
feature_dict = {'realized_vol_trade': ['sum'], 'seconds_in_bucket': ['count'], 'size': ['sum'], 'amount': ['sum'], 'order_count': ['sum']}
return agg(df, feature_dict)
def time_constraint_fe(df, stats_df, last_sec, fe_function, cols):
sub_df = df[df['seconds_in_bucket'] >= last_sec].reset_index(drop=True)
if sub_df.shape[0] > 0:
sub_stats = fe_function(sub_df)
else:
sub_stats = cudf.DataFrame(columns=cols)
return stats_df.merge(sub_stats, on='time_id', how='left', suffixes=('', f'_{last_sec}'))
def feature_engineering(book_path, trade_path):
book_df = cudf.read_parquet(book_path)
book_df = extract_raw_book_features(book_df)
book_stats = extract_book_stats(book_df)
book_cols_time = ['realized_vol1_sum', 'realized_vol2_sum', 'realized_vol3_sum', 'realized_vol4_sum'] + ['time_id']
trade_df = cudf.read_parquet(trade_path)
trade_df = extract_raw_trade_features(trade_df)
trade_stats = extract_trade_stats(trade_df)
trade_cols_time = ['realized_vol_trade_sum', 'seconds_in_bucket_count', 'size_sum', 'order_count_sum', 'amount_sum'] + ['time_id']
for last_sec in [100, 200, 300, 400, 500]:
book_stats = time_constraint_fe(book_df, book_stats, last_sec, extract_book_stats_time, book_cols_time)
trade_stats = time_constraint_fe(trade_df, trade_stats, last_sec, extract_trade_stats_time, trade_cols_time)
return book_stats.merge(trade_stats, on='time_id', how='left')
def process_data(order_book_paths, trade_paths, stock_ids):
stock_dfs = []
for book_path, trade_path in tqdm(list(zip(order_book_paths, trade_paths))):
stock_id = int(book_path.split('=')[1].split('/')[0])
df = feature_engineering(book_path, trade_path)
df['stock_id'] = stock_id
stock_dfs.append(df)
return cudf.concat(stock_dfs)
train = process_data(order_book_training, trades_training, stock_ids)
test = process_data(order_book_test, trades_test, stock_ids)
(train.shape, test.shape)
def stock_time_fe(df):
cols = ['realized_vol1_sum', 'realized_vol2_sum', 'realized_vol1_sum_200', 'realized_vol2_sum_200', 'realized_vol1_sum_300', 'realized_vol2_sum_300', 'realized_vol1_sum_400', 'realized_vol2_sum_400', 'realized_vol_trade_sum_200', 'realized_vol_trade_sum_300', 'realized_vol_trade_sum_400', 'realized_vol_trade_sum']
tmp_df = df[~df['is_test']]
for agg_col in ['stock_id', 'time_id']:
for agg_func in ['mean', 'max', 'std', 'min']:
agg_df = tmp_df.groupby(agg_col)[cols].agg(agg_func)
agg_df.columns = [f'{agg_col}_{agg_func}_{col}' for col in agg_df.columns]
df = df.merge(agg_df.reset_index(), on=agg_col, how='left')
return df
train['is_test'] = False
test['is_test'] = True
all_df = train.append(test).reset_index(drop=True)
all_df = stock_time_fe(all_df)
train = all_df[~all_df['is_test']]
test = all_df[all_df['is_test']].to_pandas()
train = dev_df.merge(train, on=['stock_id', 'time_id'], how='left').to_pandas()
num_features = [col for col in list(train.columns) if col not in {'stock_id', 'time_id', 'target', 'is_test'}]
len(num_features) | code |
73090244/cell_3 | [
"text_plain_output_1.png"
] | import cudf
PATH = '/kaggle/input/optiver-realized-volatility-prediction'
def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'):
file_name = f'{path}/{mode}.csv'
return cudf.read_csv(file_name)
dev_df = load_data('train', path=PATH)
dev_df.head() | code |
73090244/cell_5 | [
"text_plain_output_1.png"
] | import cudf
import glob
PATH = '/kaggle/input/optiver-realized-volatility-prediction'
def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'):
file_name = f'{path}/{mode}.csv'
return cudf.read_csv(file_name)
dev_df = load_data('train', path=PATH)
order_book_training = glob.glob(f'{PATH}/book_train.parquet/*/*')
order_book_test = glob.glob(f'{PATH}/book_test.parquet/*/*')
(len(order_book_training), len(order_book_test)) | code |
73099200/cell_21 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
data.MultipleLines.unique()
data.InternetService.unique() | code |
73099200/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
nom_data = pd.get_dummies(data[['customerID', 'gender', 'MonthlyCharges', 'TotalCharges']], drop_first=True)
nom_data.head() | code |
73099200/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int') | code |
73099200/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns | code |
73099200/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
le = LabelEncoder()
y = le.fit_transform(y)
y
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.columns
nom_cols = ['gender', 'InternetService', 'Contract', 'PaymentMethod']
ord_cols = ['Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'PaperlessBilling']
trans_cols = make_column_transformer((OneHotEncoder(), nom_cols), (OrdinalEncoder(), ord_cols), remainder='passthrough')
trans_cols.fit_transform(x)
from sklearn.linear_model import LinearRegression
le = LinearRegression()
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(trans_cols, le)
pipe.fit(x_train, y_train)
pipe.fit(x_train, y_train)
pred = pipe.predict(x_test)
from sklearn.metrics import mean_squared_error
mean_squared_error(pred, y_test)
from sklearn.neighbors import KNeighborsClassifiers
model = KNeighborsClassifier(n_neighbors=5)
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(trans_cols, model)
pipe.fit(x_train, y_train) | code |
73099200/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
data.MultipleLines.unique()
data.InternetService.unique()
data.Contract.unique()
data.Partner.unique() | code |
73099200/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.model_selection import KFold
kf = KFold(n_splits=3)
for i in kf.split([0, 1, 2, 3, 4, 5, 6, 7, 8]):
print(i) | code |
73099200/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
le = LabelEncoder()
y = le.fit_transform(y)
y
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.columns
nom_cols = ['gender', 'InternetService', 'Contract', 'PaymentMethod']
ord_cols = ['Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'PaperlessBilling']
trans_cols = make_column_transformer((OneHotEncoder(), nom_cols), (OrdinalEncoder(), ord_cols), remainder='passthrough')
trans_cols.fit_transform(x)
from sklearn.linear_model import LinearRegression
le = LinearRegression()
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(trans_cols, le)
pipe.fit(x_train, y_train)
pipe.fit(x_train, y_train)
pred = pipe.predict(x_test)
from sklearn.metrics import mean_squared_error
mean_squared_error(pred, y_test) | code |
73099200/cell_20 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
data.MultipleLines.unique() | code |
73099200/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape | code |
73099200/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.columns
nom_cols = ['gender', 'InternetService', 'Contract', 'PaymentMethod']
ord_cols = ['Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'PaperlessBilling']
trans_cols = make_column_transformer((OneHotEncoder(), nom_cols), (OrdinalEncoder(), ord_cols), remainder='passthrough')
trans_cols.fit_transform(x) | code |
73099200/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.head() | code |
73099200/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner']) | code |
73099200/cell_19 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.columns | code |
73099200/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73099200/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum() | code |
73099200/cell_18 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.head() | code |
73099200/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
le = LabelEncoder()
y = le.fit_transform(y)
y
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.columns
nom_cols = ['gender', 'InternetService', 'Contract', 'PaymentMethod']
ord_cols = ['Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'PaperlessBilling']
trans_cols = make_column_transformer((OneHotEncoder(), nom_cols), (OrdinalEncoder(), ord_cols), remainder='passthrough')
trans_cols.fit_transform(x)
from sklearn.linear_model import LinearRegression
le = LinearRegression()
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(trans_cols, le)
pipe.fit(x_train, y_train) | code |
73099200/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean()) | code |
73099200/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
y.head() | code |
73099200/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data | code |
73099200/cell_17 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
le = LabelEncoder()
y = le.fit_transform(y)
y | code |
73099200/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
le = LabelEncoder()
y = le.fit_transform(y)
y
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.columns
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
x.head() | code |
73099200/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
data.head() | code |
73099200/cell_22 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
data.MultipleLines.unique()
data.InternetService.unique()
data.Contract.unique() | code |
73099200/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
num_data.head() | code |
73099200/cell_27 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges'])
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
data['MonthlyCharges'] = l.fit_transform(data['MonthlyCharges'])
y = data.Churn
le = LabelEncoder()
y = le.fit_transform(y)
y
x = data.drop(columns=['customerID', 'Churn', 'TotalCharges'])
x.columns
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
x.head() | code |
73099200/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull()
data.shape
data.isnull().sum()
data.fillna(data.mean())
data.select_dtypes(include='int')
num_data = data.select_dtypes(include=['int', 'float'])
data.drop(columns=['Partner'])
data.drop(columns=['gender', 'tenure', 'TotalCharges']) | code |
73099200/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv')
data.columns
data.isnull() | code |
329077/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import math
import pandas as pd
names_data = pd.read_csv('../input/NationalNames.csv')
frequent_names = names_data[names_data['Count'] > 10]
indexed_names = frequent_names.set_index(['Year', 'Name'])['Count']
def ambiguity_measure(grouped_frame):
return 2 * (1 - grouped_frame.max() / grouped_frame.sum())
ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])).rename('Ambiguity')
yearly_ambiguity = ambiguity_data.groupby(level='Year')
ambiguity_with_counts = ambiguity_data.to_frame().join(indexed_names.groupby(level=['Year', 'Name']).sum())
data_vs_years = ambiguity_with_counts.unstack(level='Year')
data_vs_years['Total'] = data_vs_years['Count'].sum(axis=1)
yearly_ambiguity.idxmax().apply(lambda x: x[1]).to_frame() | code |
329077/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import math
import pandas as pd
names_data = pd.read_csv('../input/NationalNames.csv')
frequent_names = names_data[names_data['Count'] > 10]
indexed_names = frequent_names.set_index(['Year', 'Name'])['Count']
def ambiguity_measure(grouped_frame):
return 2 * (1 - grouped_frame.max() / grouped_frame.sum())
ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])).rename('Ambiguity')
yearly_ambiguity = ambiguity_data.groupby(level='Year')
ambiguity_with_counts = ambiguity_data.to_frame().join(indexed_names.groupby(level=['Year', 'Name']).sum())
data_vs_years = ambiguity_with_counts.unstack(level='Year')
data_vs_years['Total'] = data_vs_years['Count'].sum(axis=1)
ambiguous_names = data_vs_years[(data_vs_years['Ambiguity'] > 0.1).any(axis=1)]
popular_ambiguous_names = ambiguous_names.sort_values(by='Total', ascending=False).head(7).drop('Total', axis=1)
popular_ambiguous_names['Ambiguity'].transpose().plot(figsize=(10, 10)) | code |
329077/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import math
import pandas as pd
names_data = pd.read_csv('../input/NationalNames.csv')
frequent_names = names_data[names_data['Count'] > 10]
indexed_names = frequent_names.set_index(['Year', 'Name'])['Count']
def ambiguity_measure(grouped_frame):
return 2 * (1 - grouped_frame.max() / grouped_frame.sum())
ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])).rename('Ambiguity')
yearly_ambiguity = ambiguity_data.groupby(level='Year')
ambiguity_with_counts = ambiguity_data.to_frame().join(indexed_names.groupby(level=['Year', 'Name']).sum())
data_vs_years = ambiguity_with_counts.unstack(level='Year')
data_vs_years['Total'] = data_vs_years['Count'].sum(axis=1)
yearly_ambiguity.idxmax().apply(lambda x: x[1]).to_frame()
yearly_ambiguity.mean().transpose().plot(figsize=(10, 10)) | code |
329077/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import math
import pandas as pd
names_data = pd.read_csv('../input/NationalNames.csv')
frequent_names = names_data[names_data['Count'] > 10]
indexed_names = frequent_names.set_index(['Year', 'Name'])['Count']
def ambiguity_measure(grouped_frame):
return 2 * (1 - grouped_frame.max() / grouped_frame.sum())
ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])).rename('Ambiguity')
yearly_ambiguity = ambiguity_data.groupby(level='Year')
ambiguity_with_counts = ambiguity_data.to_frame().join(indexed_names.groupby(level=['Year', 'Name']).sum())
data_vs_years = ambiguity_with_counts.unstack(level='Year')
data_vs_years['Total'] = data_vs_years['Count'].sum(axis=1)
total_people_per_year = ambiguity_with_counts['Count'].groupby(level='Year').sum()
ambiguity_by_year = ambiguity_with_counts.unstack('Name')
ambiguity_by_year['total_people'] = total_people_per_year
weighted_ambiguity = ambiguity_by_year.apply(lambda x: x['Ambiguity'] * (x['Count'] / x['total_people'][0]), axis=1)
weighted_ambiguity.sum(axis=1).plot(figsize=(10, 10)) | code |
106198852/cell_4 | [
"text_plain_output_1.png"
] | !pip install transformers
from transformers import BertForQuestionAnswering, AutoTokenizer
modelname = 'deepset/bert-base-cased-squad2'
model = BertForQuestionAnswering.from_pretrained(modelname)
tokenizer = AutoTokenizer.from_pretrained(modelname) | code |
106198852/cell_7 | [
"text_plain_output_1.png"
] | from transformers import pipeline
context = 'The Intergovernmental Panel on Climate Change (IPCC) is a scientifie intergovernmental body under the auspicesof the United Notio ns, set up at the request of member governments. It was first established in 1988 by two UnitedNations organizations, the World Me teorological Organization (hO) and the United Nations Environment Programe(UNEP), and later endorsed by the United Nations Gener al Assembly through Resolution 43/53. Membership of the IPCCis open to all members of the WMO and UNEP. The IPCC produces reports that support the United Nations FraneworkConvention on Climate Change (UNFCCC), which is the main international treaty on climate chango. The ultimateobjective of the UNFCCC is to stabilize greenhouse gas concentrations in the atmosphere at a level that would prevent dangerous anthropogenie (human-induced) interference with the climate systen. IPCC reports cover the scientific, chnical and socio-econonfe information relevant to understanding the scientifie basis of riskof human-induced climate change,its potential impacts and options for adaptation and mitigation."'
questions = ['what orpanization is the IPCC a part of?', 'What UN organizations established the IPCC?', 'What does the UN want to stabilize?']
tokenizer.encode(questions[0], truncation=True, padding=True)
from transformers import pipeline
nlp = pipeline('question-answering', model=model, tokenizer=tokenizer)
nlp({'question': 'What organization is the IPCC a part of?', 'context': context}) | code |
106198852/cell_8 | [
"text_plain_output_1.png"
] | from transformers import pipeline
context = 'The Intergovernmental Panel on Climate Change (IPCC) is a scientifie intergovernmental body under the auspicesof the United Notio ns, set up at the request of member governments. It was first established in 1988 by two UnitedNations organizations, the World Me teorological Organization (hO) and the United Nations Environment Programe(UNEP), and later endorsed by the United Nations Gener al Assembly through Resolution 43/53. Membership of the IPCCis open to all members of the WMO and UNEP. The IPCC produces reports that support the United Nations FraneworkConvention on Climate Change (UNFCCC), which is the main international treaty on climate chango. The ultimateobjective of the UNFCCC is to stabilize greenhouse gas concentrations in the atmosphere at a level that would prevent dangerous anthropogenie (human-induced) interference with the climate systen. IPCC reports cover the scientific, chnical and socio-econonfe information relevant to understanding the scientifie basis of riskof human-induced climate change,its potential impacts and options for adaptation and mitigation."'
questions = ['what orpanization is the IPCC a part of?', 'What UN organizations established the IPCC?', 'What does the UN want to stabilize?']
tokenizer.encode(questions[0], truncation=True, padding=True)
from transformers import pipeline
nlp = pipeline('question-answering', model=model, tokenizer=tokenizer)
nlp({'question': 'What UN organizations established the IPCC?', 'context': context}) | code |
106198852/cell_5 | [
"text_plain_output_1.png"
] | questions = ['what orpanization is the IPCC a part of?', 'What UN organizations established the IPCC?', 'What does the UN want to stabilize?']
tokenizer.encode(questions[0], truncation=True, padding=True) | code |
130024391/cell_21 | [
"text_plain_output_1.png"
] | import glob
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/'
defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv')
tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv')
def prepare_fog_table(df_type):
full_data = pd.DataFrame()
subdatas = glob.glob(data_directory + f'train/{df_type}/*')
for subdata in subdatas:
sub_data = pd.read_csv(subdata)
sub_data['id'] = subdata.split(sep='/')[-1].split(sep='.')[0]
if df_type == 'defog':
sub_data['visit'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = 0
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
else:
sub_data['visit'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Test'].to_list()[0]
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
return full_data
defog_full_table = prepare_fog_table('defog')
defog_full_table
tdcsfog_full_table = prepare_fog_table('tdcsfog')
defog_full_table = defog_full_table.loc[(defog_full_table.Valid == True) & (defog_full_table.Task == True)].drop(['Valid', 'Task'], axis=1).reset_index(drop=True)
mega_data = pd.concat([defog_full_table, tdcsfog_full_table]).reset_index(drop=True)
mega_data = mega_data.replace({'on': 1, 'off': 0})
mega_data.isna().sum()
numeric_columns = ['Time', 'AccV', 'AccML', 'AccAP', 'StartHesitation', 'Turn', 'Walking', 'Visit', 'Test', 'Medication']
plt.figure(figsize=(8, 8))
sns.heatmap(mega_data[numeric_columns].corr(), annot=True)
plt.show() | code |
130024391/cell_13 | [
"text_html_output_1.png"
] | import glob
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/'
defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv')
tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv')
def prepare_fog_table(df_type):
full_data = pd.DataFrame()
subdatas = glob.glob(data_directory + f'train/{df_type}/*')
for subdata in subdatas:
sub_data = pd.read_csv(subdata)
sub_data['id'] = subdata.split(sep='/')[-1].split(sep='.')[0]
if df_type == 'defog':
sub_data['visit'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = 0
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
else:
sub_data['visit'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Test'].to_list()[0]
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
return full_data
defog_full_table = prepare_fog_table('defog')
defog_full_table | code |
130024391/cell_9 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/'
defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv')
tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv')
# Creat function to display main info about datasets
def main_info (dataset):
info = dataset.info()
describe = dataset.describe()
return print(info,"\n"*2, describe,"\n"*2)
for dataset in [defog_meta, tdcsfog_meta]:
print(f'main information:')
main_info(dataset) | code |
130024391/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/'
defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv')
tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv')
tdcsfog_meta.head() | code |
130024391/cell_19 | [
"text_html_output_1.png"
] | import glob
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/'
defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv')
tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv')
def prepare_fog_table(df_type):
full_data = pd.DataFrame()
subdatas = glob.glob(data_directory + f'train/{df_type}/*')
for subdata in subdatas:
sub_data = pd.read_csv(subdata)
sub_data['id'] = subdata.split(sep='/')[-1].split(sep='.')[0]
if df_type == 'defog':
sub_data['visit'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = 0
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
else:
sub_data['visit'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Test'].to_list()[0]
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
return full_data
defog_full_table = prepare_fog_table('defog')
defog_full_table
tdcsfog_full_table = prepare_fog_table('tdcsfog')
defog_full_table = defog_full_table.loc[(defog_full_table.Valid == True) & (defog_full_table.Task == True)].drop(['Valid', 'Task'], axis=1).reset_index(drop=True)
mega_data = pd.concat([defog_full_table, tdcsfog_full_table]).reset_index(drop=True)
mega_data = mega_data.replace({'on': 1, 'off': 0})
mega_data.isna().sum() | code |
130024391/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130024391/cell_17 | [
"text_html_output_1.png"
] | import glob
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/'
defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv')
tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv')
def prepare_fog_table(df_type):
full_data = pd.DataFrame()
subdatas = glob.glob(data_directory + f'train/{df_type}/*')
for subdata in subdatas:
sub_data = pd.read_csv(subdata)
sub_data['id'] = subdata.split(sep='/')[-1].split(sep='.')[0]
if df_type == 'defog':
sub_data['visit'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = defog_meta.loc[defog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = 0
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
else:
sub_data['visit'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Visit'].to_list()[0]
sub_data['medication'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Medication'].to_list()[0]
sub_data['subject'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Subject'].to_list()[0]
sub_data['test'] = tdcsfog_meta.loc[tdcsfog_meta['Id'] == subdata.split(sep='/')[-1].split(sep='.')[0], 'Test'].to_list()[0]
sub_data['type'] = df_type
full_data = pd.concat([full_data, sub_data]).reset_index(drop=True)
return full_data
defog_full_table = prepare_fog_table('defog')
defog_full_table
tdcsfog_full_table = prepare_fog_table('tdcsfog')
defog_full_table = defog_full_table.loc[(defog_full_table.Valid == True) & (defog_full_table.Task == True)].drop(['Valid', 'Task'], axis=1).reset_index(drop=True)
mega_data = pd.concat([defog_full_table, tdcsfog_full_table]).reset_index(drop=True)
mega_data | code |
130024391/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/'
defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv')
tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv')
defog_meta.head() | code |
16124614/cell_4 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import tensorflow as tf
print('Version: {}'.format(tf.VERSION)) | code |
16124614/cell_6 | [
"text_plain_output_1.png"
] | import pathlib
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path | code |
16124614/cell_29 | [
"text_plain_output_1.png"
] | print('Model Accuracy on Test Data: {:.1f}%'.format(test_acc * 100)) | code |
16124614/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pathlib
import random
import tensorflow as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
label_names = sorted(set((item.name for item in train_path.glob('*') if item.is_dir())))
label_to_index = dict(((name, index) for index, name in enumerate(label_names)))
label_to_index
train_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in train_image_paths]
test_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in test_image_paths]
val_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in val_image_paths]
ex_im = tf.read_file(train_image_paths[0])
ex_im = tf.image.decode_jpeg(ex_im, channels=1)
ex_im = tf.image.resize_images(ex_im, [192, 192])
target_im_size = [192, 192]
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=1)
image = tf.image.resize_image_with_crop_or_pad(image, 496, 496)
image = tf.image.resize_images(image, target_im_size)
image /= 255.0
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
train_path_ds = tf.data.Dataset.from_tensor_slices(train_image_paths)
test_path_ds = tf.data.Dataset.from_tensor_slices(test_image_paths)
val_path_ds = tf.data.Dataset.from_tensor_slices(val_image_paths)
train_image_ds = train_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
test_image_ds = test_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
val_image_ds = val_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
train_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(train_image_labels, tf.int64))
test_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(test_image_labels, tf.int64))
val_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(val_image_labels, tf.int64))
train_image_label_ds = tf.data.Dataset.zip((train_image_ds, train_label_ds))
test_image_label_ds = tf.data.Dataset.zip((test_image_ds, test_label_ds))
val_image_label_ds = tf.data.Dataset.zip((val_image_ds, val_label_ds))
print('image shape: ', train_image_label_ds.output_shapes[0])
print('label shape: ', train_image_label_ds.output_shapes[1])
print('types: ', train_image_label_ds.output_types)
print()
print(train_image_label_ds) | code |
16124614/cell_28 | [
"text_plain_output_1.png"
] | from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import os
import os
import pathlib
import random
import tensorflow as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
label_names = sorted(set((item.name for item in train_path.glob('*') if item.is_dir())))
label_to_index = dict(((name, index) for index, name in enumerate(label_names)))
label_to_index
train_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in train_image_paths]
test_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in test_image_paths]
val_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in val_image_paths]
ex_im = tf.read_file(train_image_paths[0])
ex_im = tf.image.decode_jpeg(ex_im, channels=1)
ex_im = tf.image.resize_images(ex_im, [192, 192])
target_im_size = [192, 192]
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=1)
image = tf.image.resize_image_with_crop_or_pad(image, 496, 496)
image = tf.image.resize_images(image, target_im_size)
image /= 255.0
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
train_path_ds = tf.data.Dataset.from_tensor_slices(train_image_paths)
test_path_ds = tf.data.Dataset.from_tensor_slices(test_image_paths)
val_path_ds = tf.data.Dataset.from_tensor_slices(val_image_paths)
train_image_ds = train_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
test_image_ds = test_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
val_image_ds = val_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
train_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(train_image_labels, tf.int64))
test_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(test_image_labels, tf.int64))
val_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(val_image_labels, tf.int64))
train_image_label_ds = tf.data.Dataset.zip((train_image_ds, train_label_ds))
test_image_label_ds = tf.data.Dataset.zip((test_image_ds, test_label_ds))
val_image_label_ds = tf.data.Dataset.zip((val_image_ds, val_label_ds))
BATCH_SIZE = 64
train_ds = train_image_label_ds.shuffle(buffer_size=400)
train_ds = train_ds.repeat()
train_ds = train_ds.batch(BATCH_SIZE)
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = test_image_label_ds.shuffle(buffer_size=200)
test_ds = test_ds.repeat()
test_ds = test_ds.batch(BATCH_SIZE)
test_ds = test_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_image_label_ds.shuffle(buffer_size=200)
val_ds = val_ds.repeat()
val_ds = val_ds.batch(BATCH_SIZE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)
model = models.Sequential()
model.add(layers.Conv2D(32, (5, 5), padding='valid', activation='relu', input_shape=(*target_im_size, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (5, 5), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (5, 5), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(4, activation='softmax'))
model.summary()
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
import os
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True)
EPOCHS = 1
model.fit(train_ds, epochs=EPOCHS, steps_per_epoch=len(train_image_paths) // BATCH_SIZE, callbacks=[checkpoint_callback])
test_loss, test_acc = model.evaluate(test_ds, steps=len(test_image_paths)) | code |
16124614/cell_8 | [
"text_plain_output_1.png"
] | import pathlib
import random
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
print('Number of training images:', len(train_image_paths))
print('Number of testing images:', len(test_image_paths))
print('Number of validation images:', len(val_image_paths)) | code |
16124614/cell_24 | [
"text_plain_output_1.png"
] | from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import pathlib
import random
import tensorflow as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
ex_im = tf.read_file(train_image_paths[0])
ex_im = tf.image.decode_jpeg(ex_im, channels=1)
ex_im = tf.image.resize_images(ex_im, [192, 192])
target_im_size = [192, 192]
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=1)
image = tf.image.resize_image_with_crop_or_pad(image, 496, 496)
image = tf.image.resize_images(image, target_im_size)
image /= 255.0
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
model = models.Sequential()
model.add(layers.Conv2D(32, (5, 5), padding='valid', activation='relu', input_shape=(*target_im_size, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (5, 5), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (5, 5), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(4, activation='softmax'))
model.summary() | code |
16124614/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pathlib
import random
import tensorflow as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
ex_im = tf.read_file(train_image_paths[0])
ex_im = tf.image.decode_jpeg(ex_im, channels=1)
ex_im = tf.image.resize_images(ex_im, [192, 192])
plt.imshow(ex_im[:, :, 0]) | code |
16124614/cell_10 | [
"text_plain_output_1.png"
] | import pathlib
import random
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
label_names = sorted(set((item.name for item in train_path.glob('*') if item.is_dir())))
label_to_index = dict(((name, index) for index, name in enumerate(label_names)))
label_to_index | code |
16124614/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import os
import os
import pathlib
import random
import tensorflow as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
label_names = sorted(set((item.name for item in train_path.glob('*') if item.is_dir())))
label_to_index = dict(((name, index) for index, name in enumerate(label_names)))
label_to_index
train_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in train_image_paths]
test_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in test_image_paths]
val_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in val_image_paths]
ex_im = tf.read_file(train_image_paths[0])
ex_im = tf.image.decode_jpeg(ex_im, channels=1)
ex_im = tf.image.resize_images(ex_im, [192, 192])
target_im_size = [192, 192]
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=1)
image = tf.image.resize_image_with_crop_or_pad(image, 496, 496)
image = tf.image.resize_images(image, target_im_size)
image /= 255.0
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
train_path_ds = tf.data.Dataset.from_tensor_slices(train_image_paths)
test_path_ds = tf.data.Dataset.from_tensor_slices(test_image_paths)
val_path_ds = tf.data.Dataset.from_tensor_slices(val_image_paths)
train_image_ds = train_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
test_image_ds = test_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
val_image_ds = val_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
train_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(train_image_labels, tf.int64))
test_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(test_image_labels, tf.int64))
val_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(val_image_labels, tf.int64))
train_image_label_ds = tf.data.Dataset.zip((train_image_ds, train_label_ds))
test_image_label_ds = tf.data.Dataset.zip((test_image_ds, test_label_ds))
val_image_label_ds = tf.data.Dataset.zip((val_image_ds, val_label_ds))
BATCH_SIZE = 64
train_ds = train_image_label_ds.shuffle(buffer_size=400)
train_ds = train_ds.repeat()
train_ds = train_ds.batch(BATCH_SIZE)
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = test_image_label_ds.shuffle(buffer_size=200)
test_ds = test_ds.repeat()
test_ds = test_ds.batch(BATCH_SIZE)
test_ds = test_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_image_label_ds.shuffle(buffer_size=200)
val_ds = val_ds.repeat()
val_ds = val_ds.batch(BATCH_SIZE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)
model = models.Sequential()
model.add(layers.Conv2D(32, (5, 5), padding='valid', activation='relu', input_shape=(*target_im_size, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (5, 5), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (5, 5), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(4, activation='softmax'))
model.summary()
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
import os
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True)
EPOCHS = 1
model.fit(train_ds, epochs=EPOCHS, steps_per_epoch=len(train_image_paths) // BATCH_SIZE, callbacks=[checkpoint_callback]) | code |
16124614/cell_12 | [
"text_plain_output_1.png"
] | import pathlib
import random
main_path = pathlib.Path('../input/oct2017/OCT2017 ')
train_path = main_path / 'train'
test_path = main_path / 'test'
val_path = main_path / 'val'
train_path
import random
train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))]
random.shuffle(train_image_paths)
test_image_paths = [str(path) for path in list(test_path.glob('*/*.jpeg'))]
val_image_paths = [str(path) for path in list(val_path.glob('*/*.jpeg'))]
label_names = sorted(set((item.name for item in train_path.glob('*') if item.is_dir())))
label_to_index = dict(((name, index) for index, name in enumerate(label_names)))
label_to_index
train_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in train_image_paths]
test_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in test_image_paths]
val_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in val_image_paths]
print('First 10 labels indices: ', train_image_labels[:10]) | code |
122255004/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | def thing1():
thing = input('put something: ')
a = 0
for x in list(thing):
if x == 'a':
a += 1
print('total characters:', len(thing), "\nnumber of a's:", a)
thing() | code |
72115124/cell_4 | [
"image_output_2.png",
"image_output_1.png"
] | from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.callbacks import Callback
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
class Callback(tf.keras.callbacks.Callback):
def __init__(self, x_train, y_train, x_val, y_val):
self.x = x_train
self.y = y_train
self.x_val = x_val
self.y_val = y_val
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.x)
roc_train = roc_auc_score(self.y, y_pred)
y_pred_val = self.model.predict(self.x_val)
roc_val = roc_auc_score(self.y_val, y_pred_val)
return
encoder = LabelEncoder()
scaler = MinMaxScaler()
x = pd.read_csv('../input/loan-prediction-based-on-customer-behavior/Training Data.csv')
x = pd.concat([x.loc[x['Risk_Flag'] == 0][:30996], x.loc[x['Risk_Flag'] == 1]])
y = x.pop('Risk_Flag')
str_x = x.select_dtypes(include=[object])
for i in range(0, len(str_x.columns)):
x.pop(str_x.columns[i])
x = scaler.fit_transform(x)
x = pd.DataFrame(x)
str_x = str_x.apply(encoder.fit_transform)
str_x = pd.DataFrame(str_x)
str_x.index = x.index
x = pd.concat([x, str_x], axis=1)
x_train, x_val, y_train, y_val = train_test_split(x, y, train_size=0.8, shuffle=True)
dense_1 = tf.keras.layers.Dense(192, activation='relu', input_dim=12)
dense_2 = tf.keras.layers.Dense(128, activation='relu')
dense_3 = tf.keras.layers.Dense(64, activation='relu')
dense_4 = tf.keras.layers.Dense(32, activation='relu')
output = tf.keras.layers.Dense(1, activation='sigmoid')
model = tf.keras.models.Sequential([dense_1, tf.keras.layers.Dropout(0.4), dense_2, tf.keras.layers.Dropout(0.2), dense_3, dense_4, output])
callback = Callback(x_train=x_train, y_train=y_train, x_val=x_val, y_val=y_val)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=75, batch_size=512, callbacks=[callback])
plt.xlabel('Epochs')
plt.ylabel('Validation')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['acc', 'validation acc'])
plt.show()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['loss', 'validation loss'], loc='upper left')
plt.show() | code |
72115124/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import Callback
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72115124/cell_3 | [
"text_plain_output_1.png"
] | from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.callbacks import Callback
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
class Callback(tf.keras.callbacks.Callback):
def __init__(self, x_train, y_train, x_val, y_val):
self.x = x_train
self.y = y_train
self.x_val = x_val
self.y_val = y_val
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.x)
roc_train = roc_auc_score(self.y, y_pred)
y_pred_val = self.model.predict(self.x_val)
roc_val = roc_auc_score(self.y_val, y_pred_val)
return
encoder = LabelEncoder()
scaler = MinMaxScaler()
x = pd.read_csv('../input/loan-prediction-based-on-customer-behavior/Training Data.csv')
x = pd.concat([x.loc[x['Risk_Flag'] == 0][:30996], x.loc[x['Risk_Flag'] == 1]])
y = x.pop('Risk_Flag')
str_x = x.select_dtypes(include=[object])
for i in range(0, len(str_x.columns)):
x.pop(str_x.columns[i])
x = scaler.fit_transform(x)
x = pd.DataFrame(x)
str_x = str_x.apply(encoder.fit_transform)
str_x = pd.DataFrame(str_x)
str_x.index = x.index
x = pd.concat([x, str_x], axis=1)
x_train, x_val, y_train, y_val = train_test_split(x, y, train_size=0.8, shuffle=True)
dense_1 = tf.keras.layers.Dense(192, activation='relu', input_dim=12)
dense_2 = tf.keras.layers.Dense(128, activation='relu')
dense_3 = tf.keras.layers.Dense(64, activation='relu')
dense_4 = tf.keras.layers.Dense(32, activation='relu')
output = tf.keras.layers.Dense(1, activation='sigmoid')
model = tf.keras.models.Sequential([dense_1, tf.keras.layers.Dropout(0.4), dense_2, tf.keras.layers.Dropout(0.2), dense_3, dense_4, output])
callback = Callback(x_train=x_train, y_train=y_train, x_val=x_val, y_val=y_val)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=75, batch_size=512, callbacks=[callback]) | code |
89132235/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i]) | code |
89132235/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from tensorflow.keras import datasets, layers, models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary() | code |
89132235/cell_7 | [
"text_plain_output_1.png"
] | from tensorflow.keras import datasets, layers, models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.summary() | code |
89132235/cell_3 | [
"text_plain_output_1.png"
] | from tensorflow.keras import datasets, layers, models
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() | code |
129014806/cell_4 | [
"image_output_11.png",
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"text_html_output_4.png",
"image_output_14.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_html_output_2.png",
"image_output_13.png",
"text_html_output_5.png",
"image_output_5.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_html_output_1.png",
"image_output_6.png",
"image_output_12.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"text_html_output_3.png",
"image_output_9.png"
] | from solarcurtailment import curtailment_calculation
file_path = '/kaggle/input/solarunsw/Data'
for i in [1, 11, 14, 4, 5, 9]:
sample_number = i
print('Analyzing sample number {}'.format(i))
data_file = '/data_sample_{}.csv'.format(sample_number)
ghi_file = '/ghi_sample_{}.csv'.format(sample_number)
curtailment_calculation.compute(file_path, data_file, ghi_file) | code |
129014806/cell_2 | [
"text_plain_output_1.png"
] | ! pip install solarcurtailment | code |
129014806/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from solarcurtailment import curtailment_calculation | code |
2041009/cell_4 | [
"text_html_output_1.png"
] | import datetime
import numpy as np
import pandas as pd
def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False):
features = list([])
for a in groupcolumns:
features.append(a)
if columnName is not None:
features.append(columnName)
grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'})
grpCount = grpCount[grpCount.Count >= cut]
grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'})
grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'})
grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'})
grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'})
grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'})
grpOutcomes = grpCount.merge(grpMean, on=features)
grpOutcomes = grpOutcomes.merge(grpMedian, on=features)
grpOutcomes = grpOutcomes.merge(grpMin, on=features)
grpOutcomes = grpOutcomes.merge(grpMax, on=features)
grpOutcomes = grpOutcomes.merge(grpStd, on=features)
x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
x['Outcomes'] = data2['visitors'].values
if useLOO:
nonnulls = ~x.Count.isnull()
x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes
x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes
if addNoise is True:
x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
else:
x.loc[nonnulls, 'Count'] -= 1
x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count
x.loc[nonnulls, 'Median'] /= x[nonnulls].Count
x.Count = np.log1p(x.Count)
x = x.replace(np.inf, np.nan)
x = x.replace(-np.inf, np.nan)
x = x.fillna(x.mean())
return x[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
def MungeTrain():
air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date'])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = air_visit_data.merge(air_store_info, on='air_store_id')
train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
train = train.merge(store_id_relation, on='air_store_id', how='left')
train = train.merge(hpg_store_info, on='hpg_store_id', how='left')
train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
train = train.merge(date_info, on='visit_date', how='left')
train['year'] = train.visit_date.dt.year
train['month'] = train.visit_date.dt.month
train.reserve_visitors_x = train.reserve_visitors_x.fillna(0)
train.reserve_visitors_y = train.reserve_visitors_y.fillna(0)
train.reserve_visitors_x = np.log1p(train.reserve_visitors_x)
train.reserve_visitors_y = np.log1p(train.reserve_visitors_y)
train.visitors = np.log1p(train.visitors)
train.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = train.fillna(-1)
train = train.sort_values(by='visit_date')
return train
def MungeTest(columns):
air_visit_data = pd.read_csv('../input/sample_submission.csv')
air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:])))
air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
test = air_visit_data.merge(air_store_info, on='air_store_id')
test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
test = test.merge(store_id_relation, on='air_store_id', how='left')
test = test.merge(hpg_store_info, on='hpg_store_id', how='left')
test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
test = test.merge(date_info, on='visit_date', how='left')
test['year'] = test.visit_date.dt.year
test['month'] = test.visit_date.dt.month
test.reserve_visitors_x = test.reserve_visitors_x.fillna(0)
test.reserve_visitors_y = test.reserve_visitors_y.fillna(0)
test.reserve_visitors_x = np.log1p(test.reserve_visitors_x)
test.reserve_visitors_y = np.log1p(test.reserve_visitors_y)
test = test.fillna(-1)
test = test.sort_values(by='visit_date')
test.visitors = np.log1p(test.visitors)
return test[list(['id']) + list(columns)]
train = MungeTrain()
test = MungeTest(train.columns)
train.head() | code |
2041009/cell_7 | [
"text_plain_output_1.png"
] | import datetime
import numpy as np
import pandas as pd
def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False):
features = list([])
for a in groupcolumns:
features.append(a)
if columnName is not None:
features.append(columnName)
grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'})
grpCount = grpCount[grpCount.Count >= cut]
grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'})
grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'})
grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'})
grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'})
grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'})
grpOutcomes = grpCount.merge(grpMean, on=features)
grpOutcomes = grpOutcomes.merge(grpMedian, on=features)
grpOutcomes = grpOutcomes.merge(grpMin, on=features)
grpOutcomes = grpOutcomes.merge(grpMax, on=features)
grpOutcomes = grpOutcomes.merge(grpStd, on=features)
x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
x['Outcomes'] = data2['visitors'].values
if useLOO:
nonnulls = ~x.Count.isnull()
x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes
x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes
if addNoise is True:
x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
else:
x.loc[nonnulls, 'Count'] -= 1
x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count
x.loc[nonnulls, 'Median'] /= x[nonnulls].Count
x.Count = np.log1p(x.Count)
x = x.replace(np.inf, np.nan)
x = x.replace(-np.inf, np.nan)
x = x.fillna(x.mean())
return x[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
def MungeTrain():
air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date'])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = air_visit_data.merge(air_store_info, on='air_store_id')
train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
train = train.merge(store_id_relation, on='air_store_id', how='left')
train = train.merge(hpg_store_info, on='hpg_store_id', how='left')
train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
train = train.merge(date_info, on='visit_date', how='left')
train['year'] = train.visit_date.dt.year
train['month'] = train.visit_date.dt.month
train.reserve_visitors_x = train.reserve_visitors_x.fillna(0)
train.reserve_visitors_y = train.reserve_visitors_y.fillna(0)
train.reserve_visitors_x = np.log1p(train.reserve_visitors_x)
train.reserve_visitors_y = np.log1p(train.reserve_visitors_y)
train.visitors = np.log1p(train.visitors)
train.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = train.fillna(-1)
train = train.sort_values(by='visit_date')
return train
def MungeTest(columns):
air_visit_data = pd.read_csv('../input/sample_submission.csv')
air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:])))
air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
test = air_visit_data.merge(air_store_info, on='air_store_id')
test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
test = test.merge(store_id_relation, on='air_store_id', how='left')
test = test.merge(hpg_store_info, on='hpg_store_id', how='left')
test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
test = test.merge(date_info, on='visit_date', how='left')
test['year'] = test.visit_date.dt.year
test['month'] = test.visit_date.dt.month
test.reserve_visitors_x = test.reserve_visitors_x.fillna(0)
test.reserve_visitors_y = test.reserve_visitors_y.fillna(0)
test.reserve_visitors_x = np.log1p(test.reserve_visitors_x)
test.reserve_visitors_y = np.log1p(test.reserve_visitors_y)
test = test.fillna(-1)
test = test.sort_values(by='visit_date')
test.visitors = np.log1p(test.visitors)
return test[list(['id']) + list(columns)]
train = MungeTrain()
test = MungeTest(train.columns)
twoweeks = train.visit_date.max() - pd.Timedelta(days=14)
vistrain = train[train.visit_date < twoweeks].copy()
blindtrain = train[train.visit_date >= twoweeks].copy()
print(vistrain.shape)
print(blindtrain.shape) | code |
2041009/cell_8 | [
"text_plain_output_1.png"
] | import datetime
import numpy as np
import pandas as pd
def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False):
features = list([])
for a in groupcolumns:
features.append(a)
if columnName is not None:
features.append(columnName)
grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'})
grpCount = grpCount[grpCount.Count >= cut]
grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'})
grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'})
grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'})
grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'})
grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'})
grpOutcomes = grpCount.merge(grpMean, on=features)
grpOutcomes = grpOutcomes.merge(grpMedian, on=features)
grpOutcomes = grpOutcomes.merge(grpMin, on=features)
grpOutcomes = grpOutcomes.merge(grpMax, on=features)
grpOutcomes = grpOutcomes.merge(grpStd, on=features)
x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
x['Outcomes'] = data2['visitors'].values
if useLOO:
nonnulls = ~x.Count.isnull()
x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes
x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes
if addNoise is True:
x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
else:
x.loc[nonnulls, 'Count'] -= 1
x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count
x.loc[nonnulls, 'Median'] /= x[nonnulls].Count
x.Count = np.log1p(x.Count)
x = x.replace(np.inf, np.nan)
x = x.replace(-np.inf, np.nan)
x = x.fillna(x.mean())
return x[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
def MungeTrain():
air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date'])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = air_visit_data.merge(air_store_info, on='air_store_id')
train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
train = train.merge(store_id_relation, on='air_store_id', how='left')
train = train.merge(hpg_store_info, on='hpg_store_id', how='left')
train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
train = train.merge(date_info, on='visit_date', how='left')
train['year'] = train.visit_date.dt.year
train['month'] = train.visit_date.dt.month
train.reserve_visitors_x = train.reserve_visitors_x.fillna(0)
train.reserve_visitors_y = train.reserve_visitors_y.fillna(0)
train.reserve_visitors_x = np.log1p(train.reserve_visitors_x)
train.reserve_visitors_y = np.log1p(train.reserve_visitors_y)
train.visitors = np.log1p(train.visitors)
train.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = train.fillna(-1)
train = train.sort_values(by='visit_date')
return train
def MungeTest(columns):
air_visit_data = pd.read_csv('../input/sample_submission.csv')
air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:])))
air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
test = air_visit_data.merge(air_store_info, on='air_store_id')
test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
test = test.merge(store_id_relation, on='air_store_id', how='left')
test = test.merge(hpg_store_info, on='hpg_store_id', how='left')
test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
test = test.merge(date_info, on='visit_date', how='left')
test['year'] = test.visit_date.dt.year
test['month'] = test.visit_date.dt.month
test.reserve_visitors_x = test.reserve_visitors_x.fillna(0)
test.reserve_visitors_y = test.reserve_visitors_y.fillna(0)
test.reserve_visitors_x = np.log1p(test.reserve_visitors_x)
test.reserve_visitors_y = np.log1p(test.reserve_visitors_y)
test = test.fillna(-1)
test = test.sort_values(by='visit_date')
test.visitors = np.log1p(test.visitors)
return test[list(['id']) + list(columns)]
train = MungeTrain()
test = MungeTest(train.columns)
twoweeks = train.visit_date.max() - pd.Timedelta(days=14)
vistrain = train[train.visit_date < twoweeks].copy()
blindtrain = train[train.visit_date >= twoweeks].copy()
features = ['day_of_week', 'holiday_flg', 'year']
for c in features:
print(c)
test[c + '_Count_Store'] = np.nan
test[c + '_Mean_Store'] = np.nan
test[c + '_Median_Store'] = np.nan
test[c + '_Max_Store'] = np.nan
test[c + '_Min_Store'] = np.nan
test[c + '_Std_Store'] = np.nan
vistrain[c + '_Count_Store'] = np.nan
vistrain[c + '_Mean_Store'] = np.nan
vistrain[c + '_Median_Store'] = np.nan
vistrain[c + '_Max_Store'] = np.nan
vistrain[c + '_Min_Store'] = np.nan
vistrain[c + '_Std_Store'] = np.nan
blindtrain[c + '_Count_Store'] = np.nan
blindtrain[c + '_Mean_Store'] = np.nan
blindtrain[c + '_Median_Store'] = np.nan
blindtrain[c + '_Max_Store'] = np.nan
blindtrain[c + '_Min_Store'] = np.nan
blindtrain[c + '_Std_Store'] = np.nan
test[[c + '_Count_Store', c + '_Mean_Store', c + '_Median_Store', c + '_Max_Store', c + '_Min_Store', c + '_Std_Store']] = LeaveOneOut(vistrain, test, list(['air_store_id']), c, useLOO=True, cut=0).values
blindtrain[[c + '_Count_Store', c + '_Mean_Store', c + '_Median_Store', c + '_Max_Store', c + '_Min_Store', c + '_Std_Store']] = LeaveOneOut(vistrain, blindtrain, list(['air_store_id']), c, useLOO=True, cut=0).values
vistrain[[c + '_Count_Store', c + '_Mean_Store', c + '_Median_Store', c + '_Max_Store', c + '_Min_Store', c + '_Std_Store']] = LeaveOneOut(vistrain, vistrain, list(['air_store_id']), c, useLOO=True, cut=1, addNoise=False).values
features = ['air_store_id', 'air_genre_name', 'air_area_name', 'hpg_store_id', 'hpg_genre_name', 'hpg_area_name', 'day_of_week', 'holiday_flg', 'year', 'month']
for c in features:
print(c)
test[c + '_Count'] = np.nan
test[c + '_Mean'] = np.nan
test[c + '_Median'] = np.nan
test[c + '_Max'] = np.nan
test[c + '_Min'] = np.nan
test[c + '_Std'] = np.nan
vistrain[c + '_Count'] = np.nan
vistrain[c + '_Mean'] = np.nan
vistrain[c + '_Median'] = np.nan
vistrain[c + '_Max'] = np.nan
vistrain[c + '_Min'] = np.nan
vistrain[c + '_Std'] = np.nan
blindtrain[c + '_Count'] = np.nan
blindtrain[c + '_Mean'] = np.nan
blindtrain[c + '_Median'] = np.nan
blindtrain[c + '_Max'] = np.nan
blindtrain[c + '_Min'] = np.nan
blindtrain[c + '_Std'] = np.nan
test[[c + '_Count', c + '_Mean', c + '_Median', c + '_Max', c + '_Min', c + '_Std']] = LeaveOneOut(vistrain.copy(), test.copy(), list([]), c, useLOO=False, cut=0, addNoise=False).values
blindtrain[[c + '_Count', c + '_Mean', c + '_Median', c + '_Max', c + '_Min', c + '_Std']] = LeaveOneOut(vistrain.copy(), blindtrain.copy(), list([]), c, useLOO=False, cut=0, addNoise=False).values
vistrain[[c + '_Count', c + '_Mean', c + '_Median', c + '_Max', c + '_Min', c + '_Std']] = LeaveOneOut(vistrain.copy(), vistrain.copy(), list([]), c, useLOO=True, cut=1, addNoise=False).values
test.drop(c, inplace=True, axis=1)
blindtrain.drop(c, inplace=True, axis=1)
vistrain.drop(c, inplace=True, axis=1)
test = test.fillna(-1)
blindtrain = blindtrain.fillna(-1)
vistrain = vistrain.fillna(-1) | code |
2041009/cell_5 | [
"text_plain_output_1.png"
] | import datetime
import numpy as np
import pandas as pd
def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False):
features = list([])
for a in groupcolumns:
features.append(a)
if columnName is not None:
features.append(columnName)
grpCount = data1.groupby(features)['visitors'].count().reset_index().rename(columns={'visitors': 'Count'})
grpCount = grpCount[grpCount.Count >= cut]
grpMean = data1.groupby(features)['visitors'].mean().reset_index().rename(columns={'visitors': 'Mean'})
grpMedian = data1.groupby(features)['visitors'].median().reset_index().rename(columns={'visitors': 'Median'})
grpMin = data1.groupby(features)['visitors'].min().reset_index().rename(columns={'visitors': 'Min'})
grpMax = data1.groupby(features)['visitors'].max().reset_index().rename(columns={'visitors': 'Max'})
grpStd = data1.groupby(features)['visitors'].std().reset_index().rename(columns={'visitors': 'Std'})
grpOutcomes = grpCount.merge(grpMean, on=features)
grpOutcomes = grpOutcomes.merge(grpMedian, on=features)
grpOutcomes = grpOutcomes.merge(grpMin, on=features)
grpOutcomes = grpOutcomes.merge(grpMax, on=features)
grpOutcomes = grpOutcomes.merge(grpStd, on=features)
x = pd.merge(data2[features], grpOutcomes, suffixes=('x_', ''), how='left', on=features, left_index=True)[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
x['Outcomes'] = data2['visitors'].values
if useLOO:
nonnulls = ~x.Count.isnull()
x.loc[nonnulls, 'Mean'] = x[nonnulls].Mean * x[nonnulls].Count - x[nonnulls].Outcomes
x.loc[nonnulls, 'Median'] = x[nonnulls].Median * x[nonnulls].Count - x[nonnulls].Outcomes
if addNoise is True:
x.loc[nonnulls & (x.Std > 0), 'Mean'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
x.loc[nonnulls & (x.Std > 0), 'Median'] += np.random.normal(0, x[nonnulls & (x.Std > 0)].Std, x[nonnulls & (x.Std > 0)].shape[0])
else:
x.loc[nonnulls, 'Count'] -= 1
x.loc[nonnulls, 'Mean'] /= x[nonnulls].Count
x.loc[nonnulls, 'Median'] /= x[nonnulls].Count
x.Count = np.log1p(x.Count)
x = x.replace(np.inf, np.nan)
x = x.replace(-np.inf, np.nan)
x = x.fillna(x.mean())
return x[['Count', 'Mean', 'Median', 'Max', 'Min', 'Std']]
def MungeTrain():
air_visit_data = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date'])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = air_visit_data.merge(air_store_info, on='air_store_id')
train = train.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
train = train.merge(store_id_relation, on='air_store_id', how='left')
train = train.merge(hpg_store_info, on='hpg_store_id', how='left')
train = train.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
train = train.merge(date_info, on='visit_date', how='left')
train['year'] = train.visit_date.dt.year
train['month'] = train.visit_date.dt.month
train.reserve_visitors_x = train.reserve_visitors_x.fillna(0)
train.reserve_visitors_y = train.reserve_visitors_y.fillna(0)
train.reserve_visitors_x = np.log1p(train.reserve_visitors_x)
train.reserve_visitors_y = np.log1p(train.reserve_visitors_y)
train.visitors = np.log1p(train.visitors)
train.drop(['latitude', 'longitude'], inplace=True, axis=1)
train = train.fillna(-1)
train = train.sort_values(by='visit_date')
return train
def MungeTest(columns):
air_visit_data = pd.read_csv('../input/sample_submission.csv')
air_visit_data['visit_date'] = air_visit_data.id.apply(lambda x: datetime.datetime(year=int(x[-10:-6]), month=int(x[-5:-3]), day=int(x[-2:])))
air_visit_data['air_store_id'] = air_visit_data.id.apply(lambda x: x[:-11])
air_store_info = pd.read_csv('../input/air_store_info.csv')
hpg_store_info = pd.read_csv('../input/hpg_store_info.csv')
air_reserve = pd.read_csv('../input/air_reserve.csv', parse_dates=['visit_datetime'])
air_reserve['visit_date'] = air_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
hpg_reserve = pd.read_csv('../input/hpg_reserve.csv', parse_dates=['visit_datetime'])
hpg_reserve['visit_date'] = hpg_reserve.visit_datetime.apply(lambda df: datetime.datetime(year=df.year, month=df.month, day=df.day))
store_id_relation = pd.read_csv('../input/store_id_relation.csv')
date_info = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']).rename(columns={'calendar_date': 'visit_date'})
air_reserve_by_date = air_reserve.groupby(['air_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_reserve_by_date = hpg_reserve.groupby(['hpg_store_id', 'visit_date']).reserve_visitors.sum().reset_index(drop=False)
hpg_store_info.drop(['latitude', 'longitude'], inplace=True, axis=1)
test = air_visit_data.merge(air_store_info, on='air_store_id')
test = test.merge(air_reserve_by_date, on=['air_store_id', 'visit_date'], how='left')
test = test.merge(store_id_relation, on='air_store_id', how='left')
test = test.merge(hpg_store_info, on='hpg_store_id', how='left')
test = test.merge(hpg_reserve_by_date, on=['hpg_store_id', 'visit_date'], how='left')
test = test.merge(date_info, on='visit_date', how='left')
test['year'] = test.visit_date.dt.year
test['month'] = test.visit_date.dt.month
test.reserve_visitors_x = test.reserve_visitors_x.fillna(0)
test.reserve_visitors_y = test.reserve_visitors_y.fillna(0)
test.reserve_visitors_x = np.log1p(test.reserve_visitors_x)
test.reserve_visitors_y = np.log1p(test.reserve_visitors_y)
test = test.fillna(-1)
test = test.sort_values(by='visit_date')
test.visitors = np.log1p(test.visitors)
return test[list(['id']) + list(columns)]
train = MungeTrain()
test = MungeTest(train.columns)
test.head() | code |
32065505/cell_18 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from gensim.models import KeyedVectors
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import nltk
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec'
keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH)
word_vec = keyed_vec.get_vector('foobar')
keras_embedding = keyed_vec.get_keras_embedding()
keras_embedding.get_config()
def mean_fasttext(arr, embedding_dim=300):
"""
Create the average of the fasttext embeddings from each word in a document.
Very slow function, needs to be optimized for larger datasets
"""
mean_vectors = []
for document in arr:
tokens = nltk.tokenize.word_tokenize(document)
vectors = [keyed_vec.get_vector(token) for token in tokens if token in keyed_vec.vocab]
if vectors:
mean_vec = np.vstack(vectors).mean(axis=0)
mean_vectors.append(mean_vec)
else:
mean_vectors.append(np.zeros(embedding_dim))
embedding = np.vstack(mean_vectors)
return embedding
data_sample = pd.read_csv('../input/quora-insincere-questions-classification/train.csv', nrows=6000)
train_sample = data_sample[:5000]
test_sample = data_sample[5000:]
X_train = mean_fasttext(train_sample['question_text'].values)
X_test = mean_fasttext(test_sample['question_text'].values)
y_train = train_sample['target'].values
y_test = test_sample['target'].values
model = LogisticRegression(solver='lbfgs')
model.fit(X_train, y_train)
print('Train Score:', f1_score(y_train, model.predict(X_train)))
print('Test Score:', f1_score(y_test, model.predict(X_test))) | code |
32065505/cell_8 | [
"text_plain_output_1.png"
] | from gensim.models import KeyedVectors
FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec'
keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH)
for word in ['hello', '!', '2', 'Turing', 'foobarz', 'hi!']:
print(word, 'is in the vocabulary:', word in keyed_vec.vocab) | code |
32065505/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_sample = pd.read_csv('../input/quora-insincere-questions-classification/train.csv', nrows=6000)
train_sample = data_sample[:5000]
test_sample = data_sample[5000:]
train_sample.head() | code |
32065505/cell_3 | [
"text_html_output_1.png"
] | import os
print(os.listdir('../input')) | code |
32065505/cell_17 | [
"text_plain_output_1.png"
] | from gensim.models import KeyedVectors
import nltk
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec'
keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH)
word_vec = keyed_vec.get_vector('foobar')
keras_embedding = keyed_vec.get_keras_embedding()
keras_embedding.get_config()
def mean_fasttext(arr, embedding_dim=300):
"""
Create the average of the fasttext embeddings from each word in a document.
Very slow function, needs to be optimized for larger datasets
"""
mean_vectors = []
for document in arr:
tokens = nltk.tokenize.word_tokenize(document)
vectors = [keyed_vec.get_vector(token) for token in tokens if token in keyed_vec.vocab]
if vectors:
mean_vec = np.vstack(vectors).mean(axis=0)
mean_vectors.append(mean_vec)
else:
mean_vectors.append(np.zeros(embedding_dim))
embedding = np.vstack(mean_vectors)
return embedding
data_sample = pd.read_csv('../input/quora-insincere-questions-classification/train.csv', nrows=6000)
train_sample = data_sample[:5000]
test_sample = data_sample[5000:]
X_train = mean_fasttext(train_sample['question_text'].values)
X_test = mean_fasttext(test_sample['question_text'].values)
y_train = train_sample['target'].values
y_test = test_sample['target'].values
print(X_train.shape)
print(y_train.shape) | code |
32065505/cell_10 | [
"text_plain_output_1.png"
] | from gensim.models import KeyedVectors
FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec'
keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH)
word_vec = keyed_vec.get_vector('foobar')
print(word_vec.shape)
print(word_vec[:25]) | code |
32065505/cell_12 | [
"text_plain_output_1.png"
] | from gensim.models import KeyedVectors
FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec'
keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH)
word_vec = keyed_vec.get_vector('foobar')
keras_embedding = keyed_vec.get_keras_embedding()
keras_embedding.get_config() | code |
32065505/cell_5 | [
"text_plain_output_1.png"
] | FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec'
with open(FILE_PATH) as f:
for _ in range(5):
print(f.readline()[:80]) | code |
72089413/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import json_lines
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += [item]
print(len(data0[0])) | code |
72089413/cell_34 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm import tqdm
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
import random
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += [item]
users0 = json_normalize(data0[0][0])
users0
for i, item in tqdm(enumerate(data0[0])):
if 0 < i and i < 10000:
usersi = json_normalize(item)
users0 = pd.concat([users0, usersi])
N = list(range(10000))
data1 = users0.copy()
data1['index0'] = N
data1 = data1.set_index('index0', drop=True)
data1
data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1)
data2
data2 = data2.astype(float)
target = ['subscriberCount']
dataY = data2[target[0]]
dataX = data2.drop(target, axis=1)
n = len(dataX)
random.seed(2021)
random.shuffle(N)
trainX = dataX.loc[N[0:n // 4 * 3]]
trainY = dataY.loc[N[0:n // 4 * 3]]
testX = dataX.loc[N[n // 4 * 3:]]
testY = dataY.loc[N[n // 4 * 3:]]
y = trainY
print(y.shape)
print(type(y)) | code |
72089413/cell_23 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
import random
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += [item]
users0 = json_normalize(data0[0][0])
users0
for i, item in tqdm(enumerate(data0[0])):
if 0 < i and i < 10000:
usersi = json_normalize(item)
users0 = pd.concat([users0, usersi])
N = list(range(10000))
data1 = users0.copy()
data1['index0'] = N
data1 = data1.set_index('index0', drop=True)
data1
data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1)
data2
data2 = data2.astype(float)
target = ['subscriberCount']
dataY = data2[target[0]]
dataX = data2.drop(target, axis=1)
n = len(dataX)
print(n)
random.seed(2021)
random.shuffle(N) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.