path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
49118983/cell_44 | [
"text_plain_output_1.png"
] | import tensorflow as tf
import tensorflow.keras.layers as L
import tensorflow.keras.models as M
FE = ['content_emb', 'user_emb', 'duration', 'prior_answer']
TARGET = 'answered_correctly'
x = tr_preprocessed.loc[tr_preprocessed.answered_correctly != -1, FE].values
y = tr_preprocessed.loc[tr_preprocessed.answered_correctly != -1, TARGET].values
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
with tpu_strategy.scope():
def make_ann(n_in):
inp = L.Input(shape=(n_in,), name='inp')
d1 = L.Dense(100, activation='relu', name='d1')(inp)
d2 = L.Dense(100, activation='relu', name='d2')(d1)
preds = L.Dense(1, activation='sigmoid', name='preds')(d2)
model = M.Model(inp, preds, name='ANN')
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
net = make_ann(x.shape[1])
net.fit(x, y, validation_split=0.2, batch_size=30000, epochs=1) | code |
49118983/cell_40 | [
"text_plain_output_1.png"
] | import tensorflow as tf
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu) | code |
49118983/cell_29 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import gc
import tensorflow as tf
import tensorflow.keras.models as M
import tensorflow.keras.layers as L
import riiideducation
INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/'
TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv')
TEST_FILE = os.path.join(INPUT_DIR, 'test.csv')
QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv')
LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv')
tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'})
def ds_to_pickle(ds, ds_file, pkl_file):
ds.to_pickle(pkl_file)
del ds
return pd.read_pickle('tr.pkl')
tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl')
total_num_users = tr.user_id.unique().size
unique_user_ids = list(tr.user_id.unique())
total_num_ques = tr.loc[tr.content_type_id == 0].content_id.unique().size
unique_ques = list(tr.loc[tr.content_type_id == 0].content_id.unique())
num_ques_per_user = pd.DataFrame({'user_id': list(tr.loc[tr.content_type_id == 0].user_id.unique()), 'num_ques_answered': list(tr.loc[tr.content_type_id == 0].user_id.value_counts())})
num_ques_answered = num_ques_per_user.sort_values('num_ques_answered')['num_ques_answered'].to_frame(name='num_ques_answered')
new_num_rows = len(tr_user_ques_gt_100.index)
old_num_rows = len(tr.index)
tr_user_ques_gt_100.to_pickle('tr_user_ans_gt_100_ques.pkl')
tr_user_ques_gt_100.info() | code |
49118983/cell_26 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import gc
import tensorflow as tf
import tensorflow.keras.models as M
import tensorflow.keras.layers as L
import riiideducation
INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/'
TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv')
TEST_FILE = os.path.join(INPUT_DIR, 'test.csv')
QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv')
LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv')
tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'})
def ds_to_pickle(ds, ds_file, pkl_file):
ds.to_pickle(pkl_file)
del ds
return pd.read_pickle('tr.pkl')
tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl')
total_num_users = tr.user_id.unique().size
unique_user_ids = list(tr.user_id.unique())
total_num_ques = tr.loc[tr.content_type_id == 0].content_id.unique().size
unique_ques = list(tr.loc[tr.content_type_id == 0].content_id.unique())
num_ques_per_user = pd.DataFrame({'user_id': list(tr.loc[tr.content_type_id == 0].user_id.unique()), 'num_ques_answered': list(tr.loc[tr.content_type_id == 0].user_id.value_counts())})
num_ques_answered = num_ques_per_user.sort_values('num_ques_answered')['num_ques_answered'].to_frame(name='num_ques_answered')
new_num_rows = len(tr_user_ques_gt_100.index)
old_num_rows = len(tr.index)
print('Old rows:', old_num_rows, '\nNew rows:', new_num_rows, '\nReduced to:', new_num_rows * 100 / old_num_rows, '% of original dataset size')
print("That's a 70% reduction, YAY!") | code |
49118983/cell_11 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import gc
import tensorflow as tf
import tensorflow.keras.models as M
import tensorflow.keras.layers as L
import riiideducation
INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/'
TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv')
TEST_FILE = os.path.join(INPUT_DIR, 'test.csv')
QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv')
LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv')
tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'})
def ds_to_pickle(ds, ds_file, pkl_file):
ds.to_pickle(pkl_file)
del ds
return pd.read_pickle('tr.pkl')
tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl') | code |
49118983/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
import gc
import tensorflow as tf
import tensorflow.keras.models as M
import tensorflow.keras.layers as L
import riiideducation
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49118983/cell_28 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import gc
import tensorflow as tf
import tensorflow.keras.models as M
import tensorflow.keras.layers as L
import riiideducation
INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/'
TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv')
TEST_FILE = os.path.join(INPUT_DIR, 'test.csv')
QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv')
LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv')
tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'})
def ds_to_pickle(ds, ds_file, pkl_file):
ds.to_pickle(pkl_file)
del ds
return pd.read_pickle('tr.pkl')
tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl')
total_num_users = tr.user_id.unique().size
unique_user_ids = list(tr.user_id.unique())
total_num_ques = tr.loc[tr.content_type_id == 0].content_id.unique().size
unique_ques = list(tr.loc[tr.content_type_id == 0].content_id.unique())
num_ques_per_user = pd.DataFrame({'user_id': list(tr.loc[tr.content_type_id == 0].user_id.unique()), 'num_ques_answered': list(tr.loc[tr.content_type_id == 0].user_id.value_counts())})
num_ques_answered = num_ques_per_user.sort_values('num_ques_answered')['num_ques_answered'].to_frame(name='num_ques_answered')
new_num_rows = len(tr_user_ques_gt_100.index)
old_num_rows = len(tr.index)
tr.info() | code |
49118983/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import gc
import tensorflow as tf
import tensorflow.keras.models as M
import tensorflow.keras.layers as L
import riiideducation
INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/'
TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv')
TEST_FILE = os.path.join(INPUT_DIR, 'test.csv')
QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv')
LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv')
tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'})
tr.head() | code |
49118983/cell_3 | [
"text_plain_output_1.png"
] | import gc
gc.collect() | code |
49118983/cell_12 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import gc
import tensorflow as tf
import tensorflow.keras.models as M
import tensorflow.keras.layers as L
import riiideducation
INPUT_DIR = '/kaggle/input/riiid-test-answer-prediction/'
TRAIN_FILE = os.path.join(INPUT_DIR, 'train.csv')
TEST_FILE = os.path.join(INPUT_DIR, 'test.csv')
QUES_FILE = os.path.join(INPUT_DIR, 'questions.csv')
LEC_FILE = os.path.join(INPUT_DIR, 'lectures.csv')
tr = pd.read_csv(TRAIN_FILE, usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'})
def ds_to_pickle(ds, ds_file, pkl_file):
ds.to_pickle(pkl_file)
del ds
return pd.read_pickle('tr.pkl')
tr = ds_to_pickle(tr, TRAIN_FILE, 'tr.pkl')
tr.info() | code |
49118983/cell_36 | [
"text_plain_output_1.png"
] | tr_preprocessed = preprocess(tr) | code |
50243208/cell_13 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv', parse_dates=['date'])
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
data_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sales_train = sales_train[sales_train.item_price < 40000]
sales_train = sales_train[sales_train.item_cnt_day < 200]
sales_train = sales_train[sales_train.item_cnt_day > -1]
columns = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_price', 'item_cnt_day']
sales_train.drop_duplicates(columns, keep='first', inplace=True)
data = sales_train[['item_cnt_day', 'item_price']]
x = data.iloc[:, :-1].values
y = data.iloc[:, 1].values
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=1 / 3, random_state=123, shuffle=1)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
plt.scatter(X_train, y_train, color='red')
plt.plot(X_train, model.predict(X_train), color='blue')
plt.xlabel('item_cnt_day')
plt.ylabel('item_price')
plt.show() | code |
50243208/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv', parse_dates=['date'])
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
data_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sales_train = sales_train[sales_train.item_price < 40000]
sales_train = sales_train[sales_train.item_cnt_day < 200]
sales_train = sales_train[sales_train.item_cnt_day > -1]
columns = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_price', 'item_cnt_day']
sales_train.drop_duplicates(columns, keep='first', inplace=True)
plt.figure(figsize=(10, 10))
plt.scatter(sales_train.item_cnt_day, sales_train.item_price)
plt.show() | code |
50243208/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv', parse_dates=['date'])
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
data_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sales_train = sales_train[sales_train.item_price < 40000]
sales_train = sales_train[sales_train.item_cnt_day < 200]
sales_train = sales_train[sales_train.item_cnt_day > -1]
columns = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_price', 'item_cnt_day']
sales_train.drop_duplicates(columns, keep='first', inplace=True)
data = sales_train[['item_cnt_day', 'item_price']]
data.info()
data.head() | code |
50243208/cell_1 | [
"text_plain_output_1.png"
] | import os
import os
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50243208/cell_15 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv', parse_dates=['date'])
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
data_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sales_train = sales_train[sales_train.item_price < 40000]
sales_train = sales_train[sales_train.item_cnt_day < 200]
sales_train = sales_train[sales_train.item_cnt_day > -1]
columns = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_price', 'item_cnt_day']
sales_train.drop_duplicates(columns, keep='first', inplace=True)
data = sales_train[['item_cnt_day', 'item_price']]
x = data.iloc[:, :-1].values
y = data.iloc[:, 1].values
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=1 / 3, random_state=123, shuffle=1)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
r2_score(y_test, y_pred) | code |
50243208/cell_3 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv', parse_dates=['date'])
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
data_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sales_train.info()
sales_train.head() | code |
50243208/cell_5 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv', parse_dates=['date'])
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
data_test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
plt.figure(figsize=(10, 10))
plt.scatter(sales_train.item_cnt_day, sales_train.item_price)
plt.show() | code |
72092168/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
p1 = '../input/30dml-30-d-ml-xgb/submission.csv'
p2 = '../input/30dml-catboost/submission.csv'
p3 = '../input/30dml-catboost-xgb-folds/submission.csv'
p4 = '../input/30dml-lightgbm/submission_lgb_5.csv'
all_s = []
for p in [p1, p2, p3, p4]:
all_s.append(pd.read_csv(p))
weights = [0.03, 0.2, 0.03, 0.74]
sub = pd.concat([w * x.target for x, w in zip(all_s, weights)], axis=1).sum(axis=1)
sub.name = 'target'
sub = pd.concat([all_s[0]['id'], sub], axis=1)
sub.to_csv('submission_ens.csv', index=False)
sub.head() | code |
90150886/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data['model'].duplicated().sum() | code |
90150886/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
y.head() | code |
90150886/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape | code |
90150886/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
model = LinearRegression()
model.fit(X.values, y)
model.score(X.values, y)
def function(x, a):
f = a[2] * x * x + a[1] * x + a[0]
return f
def grad(x, a):
g = 2 * a[2] * x + a[1]
return g
X = data2.drop(columns=['price'])
y = data2['price']
f = function(X, y)
plt.scatter(X, f)
plt.plot(X, f)
plt.xlabel('X')
plt.ylabel('f(X)') | code |
90150886/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
model = LinearRegression()
model.fit(X.values, y)
model.score(X.values, y)
model.intercept_
model.predict([[130.0, 80.0]]) | code |
90150886/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
sns.lmplot(x='battery', y='price', data=data2, ci=None) | code |
90150886/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
model = LinearRegression()
model.fit(X.values, y)
model.score(X.values, y)
model.intercept_ | code |
90150886/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum() | code |
90150886/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
sns.lmplot(x='camera', y='price', data=data2, ci=None) | code |
90150886/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.head() | code |
90150886/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
model = LinearRegression()
model.fit(X.values, y)
model.score(X.values, y)
model.intercept_
model.predict([[130.0, 80.0]])
model.coef_ | code |
90150886/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
model = LinearRegression()
model.fit(X.values, y)
model.score(X.values, y) | code |
90150886/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum() | code |
90150886/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
plt.figure(figsize=(16, 6))
plt.title('Distribution of Mobile Prices', size=15, color='black')
plt.xlabel('Price $', fontsize=15)
plt.ylabel('Density', fontsize=15)
sns.distplot(data['price'], color='blue')
plt.xlabel('Price ($)')
plt.grid(True)
plt.show() | code |
90150886/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.head(10) | code |
90150886/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
data2.head() | code |
90150886/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
X.head() | code |
90150886/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.head() | code |
90150886/cell_27 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
model = LinearRegression()
model.fit(X.values, y) | code |
90150886/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes | code |
90150886/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/mobile-phone-rating/mobile phone rating by dxo.csv', parse_dates=True)
data.shape
data.dtypes
data['price'] = data['price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
data['price'] = data['price'].apply(lambda x: float(x))
data.isnull().sum()
data['camera'].fillna(data['camera'].mean(), inplace=True)
data['selfie'].fillna(data['selfie'].median(), inplace=True)
data['audio'].fillna(data['audio'].mean(), inplace=True)
data['battery'].fillna(data['battery'].mean(), inplace=True)
data['display'].fillna(data['display'].median(), inplace=True)
data.isnull().sum()
data2 = data[['price', 'camera', 'battery']]
X = data2.drop(columns=['price'])
y = data2['price']
model = LinearRegression()
model.fit(X.values, y)
model.score(X.values, y)
def function(x, a):
f = a[2] * x * x + a[1] * x + a[0]
return f
def grad(x, a):
g = 2 * a[2] * x + a[1]
return g
X = data2.drop(columns=['price'])
y = data2['price']
f = function(X, y)
x = data[['battery']]
y = data['price']
plt.plot(x, y, 'r.') | code |
73097219/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
def submit(model, test_features, test_ids, filename):
loss_pred = model.predict(test_features)
submission = pd.DataFrame({'id': test_ids, 'loss': loss_pred.reshape(-1)})
submission.to_csv(filename, index=False)
train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv')
train.head() | code |
73097219/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
def submit(model, test_features, test_ids, filename):
loss_pred = model.predict(test_features)
submission = pd.DataFrame({'id': test_ids, 'loss': loss_pred.reshape(-1)})
submission.to_csv(filename, index=False)
train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv')
train.shape
corr_score = train.corr()
train.pop('id')
test_ids = test.pop('id')
train_mean = train.mean()
train_std = train.std()
train_targets_mean = train_mean.pop('loss')
train_targets_std = train_std.pop('loss')
train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss'))
should_scale = True
if should_scale == True:
train_features = (train_features - train_mean) / train_std
validation_features = (validation_features - train_mean) / train_std
test_features = (test - train_mean) / train_std
print(test_features.head())
print(train_features.head())
print(validation_features.head()) | code |
73097219/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
def submit(model, test_features, test_ids, filename):
loss_pred = model.predict(test_features)
submission = pd.DataFrame({'id': test_ids, 'loss': loss_pred.reshape(-1)})
submission.to_csv(filename, index=False)
train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv')
train.shape
train.describe().transpose() | code |
73097219/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
import catboost
import numpy as np
import pandas as pd
import time
def submit(model, test_features, test_ids, filename):
loss_pred = model.predict(test_features)
submission = pd.DataFrame({'id': test_ids, 'loss': loss_pred.reshape(-1)})
submission.to_csv(filename, index=False)
train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv')
train.shape
corr_score = train.corr()
train.pop('id')
test_ids = test.pop('id')
train_mean = train.mean()
train_std = train.std()
train_targets_mean = train_mean.pop('loss')
train_targets_std = train_std.pop('loss')
train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss'))
should_scale = True
if should_scale == True:
train_features = (train_features - train_mean) / train_std
validation_features = (validation_features - train_mean) / train_std
test_features = (test - train_mean) / train_std
import catboost
import time
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
begin = time.time()
parameters = {'depth': [6, 7, 8], 'learning_rate': [0.08, 0.1], 'iterations': [300, 350]}
def train_catboost(hyperparameters, X_train, X_val, y_train, y_val):
keys = hyperparameters.keys()
best_index = {key: 0 for key in keys}
best_cat = None
best_score = 1000000000.0
for index, key in enumerate(keys):
items = hyperparameters[key]
best_parameter = None
temp_best = 1000000000.0
for key_index, item in enumerate(items):
iterations = hyperparameters['iterations'][best_index['iterations']] if key != 'iterations' else item
learning_rate = hyperparameters['learning_rate'][best_index['learning_rate']] if key != 'learning_rate' else item
depth = hyperparameters['depth'][best_index['depth']] if key != 'depth' else item
cat = catboost.CatBoostRegressor(iterations=iterations, learning_rate=learning_rate, depth=depth)
cat.fit(X_train, y_train, verbose=False)
y_pred = cat.predict(X_val)
score = np.sqrt(mean_squared_error(y_val, y_pred))
if score < temp_best:
temp_best = score
best_index[key] = key_index
best_parameter = item
if score < best_score:
best_score = score
best_cat = cat
best_parameters = {'iterations': hyperparameters['iterations'][best_index['iterations']], 'learning_rate': hyperparameters['learning_rate'][best_index['learning_rate']], 'depth': hyperparameters['depth'][best_index['depth']]}
return (best_cat, best_score, best_parameters)
best_cat, best_score, best_parameters = train_catboost(parameters, train_features, validation_features, train_targets, validation_targets)
elapsed = time.time() - begin
submit(best_cat, test_features, test_ids, 'submission.csv')
from sklearn.model_selection import KFold
fold = 1
for train_indices, val_indices in KFold(n_splits=5, shuffle=True).split(train):
print('Training with Fold %d' % fold)
X_train = train.iloc[train_indices]
X_val = train.iloc[val_indices]
y_train = X_train.pop('loss')
y_val = X_val.pop('loss')
X_train = (X_train - train_mean) / train_std
X_val = (X_val - train_mean) / train_std
cat = catboost.CatBoostRegressor(iterations=best_parameters['iterations'], learning_rate=best_parameters['learning_rate'], depth=best_parameters['depth'])
cat.fit(X_train, y_train, verbose=False)
y_pred = cat.predict(X_val)
score = np.sqrt(mean_squared_error(y_val, y_pred))
print('RMSE: %.2f' % score)
submit(cat, test_features, test_ids, 'submission_fold%d.csv' % fold)
fold += 1 | code |
73097219/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
def submit(model, test_features, test_ids, filename):
loss_pred = model.predict(test_features)
submission = pd.DataFrame({'id': test_ids, 'loss': loss_pred.reshape(-1)})
submission.to_csv(filename, index=False)
train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv')
train.shape
corr_score = train.corr()
corr_score['loss'].sort_values(ascending=False) | code |
73097219/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
def submit(model, test_features, test_ids, filename):
loss_pred = model.predict(test_features)
submission = pd.DataFrame({'id': test_ids, 'loss': loss_pred.reshape(-1)})
submission.to_csv(filename, index=False)
train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv')
train.shape | code |
73097219/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_error
import catboost
import numpy as np
import pandas as pd
import time
def submit(model, test_features, test_ids, filename):
loss_pred = model.predict(test_features)
submission = pd.DataFrame({'id': test_ids, 'loss': loss_pred.reshape(-1)})
submission.to_csv(filename, index=False)
train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv')
test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv')
train.shape
corr_score = train.corr()
train.pop('id')
test_ids = test.pop('id')
train_mean = train.mean()
train_std = train.std()
train_targets_mean = train_mean.pop('loss')
train_targets_std = train_std.pop('loss')
train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss'))
should_scale = True
if should_scale == True:
train_features = (train_features - train_mean) / train_std
validation_features = (validation_features - train_mean) / train_std
test_features = (test - train_mean) / train_std
import catboost
import time
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
begin = time.time()
parameters = {'depth': [6, 7, 8], 'learning_rate': [0.08, 0.1], 'iterations': [300, 350]}
def train_catboost(hyperparameters, X_train, X_val, y_train, y_val):
keys = hyperparameters.keys()
best_index = {key: 0 for key in keys}
best_cat = None
best_score = 1000000000.0
for index, key in enumerate(keys):
print('Find best parameter for %s' % key)
items = hyperparameters[key]
best_parameter = None
temp_best = 1000000000.0
for key_index, item in enumerate(items):
iterations = hyperparameters['iterations'][best_index['iterations']] if key != 'iterations' else item
learning_rate = hyperparameters['learning_rate'][best_index['learning_rate']] if key != 'learning_rate' else item
depth = hyperparameters['depth'][best_index['depth']] if key != 'depth' else item
print('Train with iterations: %d learning_rate: %.2f depth:%d' % (iterations, learning_rate, depth))
cat = catboost.CatBoostRegressor(iterations=iterations, learning_rate=learning_rate, depth=depth)
cat.fit(X_train, y_train, verbose=False)
y_pred = cat.predict(X_val)
score = np.sqrt(mean_squared_error(y_val, y_pred))
print('RMSE: %.2f' % score)
if score < temp_best:
temp_best = score
best_index[key] = key_index
best_parameter = item
if score < best_score:
best_score = score
best_cat = cat
print('Best Parameter for %s: ' % key, best_parameter)
best_parameters = {'iterations': hyperparameters['iterations'][best_index['iterations']], 'learning_rate': hyperparameters['learning_rate'][best_index['learning_rate']], 'depth': hyperparameters['depth'][best_index['depth']]}
return (best_cat, best_score, best_parameters)
best_cat, best_score, best_parameters = train_catboost(parameters, train_features, validation_features, train_targets, validation_targets)
print('Best CatBoost Model: ', best_cat)
print('Best MAE: ', best_score)
elapsed = time.time() - begin
print('Elapsed time: ', elapsed)
submit(best_cat, test_features, test_ids, 'submission.csv') | code |
74053599/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test.head() | code |
74053599/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
print(len(train))
print(len(test)) | code |
74053599/cell_11 | [
"text_html_output_1.png"
] | numerical_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'int64' or X_train_full[col].dtype == 'float64']
print(len(numerical_cols)) | code |
74053599/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import OneHotEncoder
from xgboost import XGBRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
numerical_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'int64' or X_train_full[col].dtype == 'float64']
categorical_cols_prev = [col for col in X_train_full.columns if X_train_full[col].dtype == 'object' and X_train_full[col].nunique() > 4]
categorical_cols = []
for col in categorical_cols_prev:
if set(list(test[col].unique())).issubset(set(list(X_train_full[col].unique()))):
categorical_cols.append(col)
X_train = X_train_full[numerical_cols + categorical_cols]
X_valid = X_valid_full[numerical_cols + categorical_cols]
X_test = test[numerical_cols + categorical_cols]
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_X_train = pd.DataFrame(encoder.fit_transform(X_train[categorical_cols]))
OH_X_valid = pd.DataFrame(encoder.transform(X_valid[categorical_cols]))
OH_X_test = pd.DataFrame(encoder.transform(X_test[categorical_cols]))
OH_X_train.index = X_train.index
OH_X_valid.index = X_valid.index
OH_X_test.index = X_test.index
X_train_wna = X_train.drop(categorical_cols, axis=1)
X_valid_wna = X_valid.drop(categorical_cols, axis=1)
X_test_wna = X_test.drop(categorical_cols, axis=1)
X_train = pd.concat((X_train_wna, OH_X_train), axis=1)
X_valid = pd.concat((X_valid_wna, OH_X_valid), axis=1)
X_test = pd.concat((X_test_wna, OH_X_test), axis=1)
from xgboost import XGBRegressor
model = XGBRegressor(learning_rate=0.05, n_estimators=1000)
model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], eval_metric='mae')
y_val_predicted = model.predict(X_valid)
from sklearn.metrics import mean_absolute_error
rmse = mean_absolute_error(y_valid, y_val_predicted)
rmse | code |
74053599/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74053599/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
print(len(train.columns))
print(len(train.columns)) | code |
74053599/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
df_na = train.isna().sum()
df_na = df_na[df_na > 0]
print(len(df_na)) | code |
74053599/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
numerical_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'int64' or X_train_full[col].dtype == 'float64']
categorical_cols_prev = [col for col in X_train_full.columns if X_train_full[col].dtype == 'object' and X_train_full[col].nunique() > 4]
categorical_cols = []
for col in categorical_cols_prev:
if set(list(test[col].unique())).issubset(set(list(X_train_full[col].unique()))):
categorical_cols.append(col)
X_train = X_train_full[numerical_cols + categorical_cols]
X_valid = X_valid_full[numerical_cols + categorical_cols]
X_test = test[numerical_cols + categorical_cols]
impute_1 = SimpleImputer(strategy='most_frequent')
X_train[categorical_cols] = impute_1.fit_transform(X_train[categorical_cols])
X_valid[categorical_cols] = impute_1.transform(X_valid[categorical_cols])
X_test[categorical_cols] = impute_1.transform(X_test[categorical_cols]) | code |
74053599/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
train.head() | code |
74053599/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
from xgboost import XGBRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
numerical_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'int64' or X_train_full[col].dtype == 'float64']
categorical_cols_prev = [col for col in X_train_full.columns if X_train_full[col].dtype == 'object' and X_train_full[col].nunique() > 4]
categorical_cols = []
for col in categorical_cols_prev:
if set(list(test[col].unique())).issubset(set(list(X_train_full[col].unique()))):
categorical_cols.append(col)
X_train = X_train_full[numerical_cols + categorical_cols]
X_valid = X_valid_full[numerical_cols + categorical_cols]
X_test = test[numerical_cols + categorical_cols]
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_X_train = pd.DataFrame(encoder.fit_transform(X_train[categorical_cols]))
OH_X_valid = pd.DataFrame(encoder.transform(X_valid[categorical_cols]))
OH_X_test = pd.DataFrame(encoder.transform(X_test[categorical_cols]))
OH_X_train.index = X_train.index
OH_X_valid.index = X_valid.index
OH_X_test.index = X_test.index
X_train_wna = X_train.drop(categorical_cols, axis=1)
X_valid_wna = X_valid.drop(categorical_cols, axis=1)
X_test_wna = X_test.drop(categorical_cols, axis=1)
X_train = pd.concat((X_train_wna, OH_X_train), axis=1)
X_valid = pd.concat((X_valid_wna, OH_X_valid), axis=1)
X_test = pd.concat((X_test_wna, OH_X_test), axis=1)
from xgboost import XGBRegressor
model = XGBRegressor(learning_rate=0.05, n_estimators=1000)
model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], eval_metric='mae') | code |
74053599/cell_14 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
numerical_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'int64' or X_train_full[col].dtype == 'float64']
categorical_cols_prev = [col for col in X_train_full.columns if X_train_full[col].dtype == 'object' and X_train_full[col].nunique() > 4]
categorical_cols = []
for col in categorical_cols_prev:
if set(list(test[col].unique())).issubset(set(list(X_train_full[col].unique()))):
categorical_cols.append(col)
X_train = X_train_full[numerical_cols + categorical_cols]
X_valid = X_valid_full[numerical_cols + categorical_cols]
X_test = test[numerical_cols + categorical_cols]
from sklearn.impute import SimpleImputer
impute = SimpleImputer(strategy='mean')
X_train[numerical_cols] = impute.fit_transform(X_train[numerical_cols])
X_valid[numerical_cols] = impute.transform(X_valid[numerical_cols])
X_test[numerical_cols] = impute.transform(X_test[numerical_cols]) | code |
74053599/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
numerical_cols = [col for col in X_train_full.columns if X_train_full[col].dtype == 'int64' or X_train_full[col].dtype == 'float64']
categorical_cols_prev = [col for col in X_train_full.columns if X_train_full[col].dtype == 'object' and X_train_full[col].nunique() > 4]
categorical_cols = []
for col in categorical_cols_prev:
if set(list(test[col].unique())).issubset(set(list(X_train_full[col].unique()))):
categorical_cols.append(col)
print(len(categorical_cols)) | code |
74053599/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
test = pd.read_csv('/kaggle/input/home-data-for-ml-course/train.csv')
train.describe() | code |
74055991/cell_13 | [
"image_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab
learn = cnn_learner(dls, resnet34, metrics=[error_rate, accuracy])
lrn_min, lrn_steep = learn.lr_find()
learn.fine_tune(1)
learn = cnn_learner(dls, resnet50, metrics=[error_rate, accuracy])
lrn_min, lrn_steep = learn.lr_find() | code |
74055991/cell_9 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab
learn = cnn_learner(dls, resnet34, metrics=[error_rate, accuracy])
lrn_min, lrn_steep = learn.lr_find() | code |
74055991/cell_6 | [
"image_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.show_batch() | code |
74055991/cell_11 | [
"text_plain_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab
learn = cnn_learner(dls, resnet34, metrics=[error_rate, accuracy])
lrn_min, lrn_steep = learn.lr_find()
learn.fine_tune(1)
learn.show_results() | code |
74055991/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab | code |
74055991/cell_8 | [
"image_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab
learn = cnn_learner(dls, resnet34, metrics=[error_rate, accuracy]) | code |
74055991/cell_14 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab
learn = cnn_learner(dls, resnet34, metrics=[error_rate, accuracy])
lrn_min, lrn_steep = learn.lr_find()
learn.fine_tune(1)
learn = cnn_learner(dls, resnet50, metrics=[error_rate, accuracy])
lrn_min, lrn_steep = learn.lr_find()
learn.fine_tune(1) | code |
74055991/cell_10 | [
"image_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab
learn = cnn_learner(dls, resnet34, metrics=[error_rate, accuracy])
lrn_min, lrn_steep = learn.lr_find()
learn.fine_tune(1) | code |
74055991/cell_12 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | path = '/kaggle/input/cell-images-for-detecting-malaria/cell_images/'
fields = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=RandomSplitter(valid_pct=0.2, seed=42), item_tfms=RandomResizedCrop(114, min_scale=0.5), batch_tfms=aug_transforms())
dls = fields.dataloaders(path)
dls.vocab
learn = cnn_learner(dls, resnet50, metrics=[error_rate, accuracy]) | code |
90129163/cell_63 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | print(f'Mean accuracy score: {accuracy}') | code |
90129163/cell_21 | [
"text_plain_output_1.png"
] | sub.sample(10) | code |
90129163/cell_81 | [
"text_plain_output_1.png"
] | y_prob = sum(y_probs) / len(y_probs)
y_prob_results = np.argmax(y_prob, axis=1)
y_prob_results = y_prob_results.astype('bool')
sub['Transported'] = y_prob_results
sub.to_csv('submission_twenty_fold_loop_03112022.csv', index=False) | code |
90129163/cell_13 | [
"text_plain_output_1.png"
] | trn_data.head() | code |
90129163/cell_25 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | trn_passenger_ids = set(trn_data['PassengerId'].unique())
tst_passenger_ids = set(tst_data['PassengerId'].unique())
intersection = trn_passenger_ids.intersection(tst_passenger_ids)
print('Overlapped Passengers:', len(intersection)) | code |
90129163/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90129163/cell_56 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
test_size_pct = 0.01
X_train, X_valid, y_train, y_valid = train_test_split(trn_data[features], trn_data[target_feature], test_size=test_size_pct, random_state=42) | code |
90129163/cell_34 | [
"text_plain_output_1.png"
] | trn_relatives = trn_relatives.rename(columns={'PassengerId': 'NumRelatives'})
tst_relatives = tst_relatives.rename(columns={'PassengerId': 'NumRelatives'}) | code |
90129163/cell_23 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | def analyse_categ_target(df, target='Transported'):
transported = df[df[target] == True].shape[0]
not_transported = df[df[target] == False].shape[0]
total = transported + not_transported
print(f'Transported : {transported / total:.2f} %')
print(f'Not Transported : {not_transported / total:.2f} %')
print(f'Total Passengers: {total}')
print('...') | code |
90129163/cell_79 | [
"text_plain_output_1.png"
] | print('Mean accuracy score:', np.array(scores).mean()) | code |
90129163/cell_30 | [
"text_plain_output_1.png"
] | trn_data = total_billed(trn_data)
tst_data = total_billed(tst_data) | code |
90129163/cell_33 | [
"text_plain_output_1.png"
] | trn_relatives = trn_data.groupby('FamilyName')['PassengerId'].count().reset_index()
tst_relatives = tst_data.groupby('FamilyName')['PassengerId'].count().reset_index() | code |
90129163/cell_44 | [
"text_plain_output_1.png"
] | trn_data.head() | code |
90129163/cell_20 | [
"text_plain_output_1.png"
] | tst_data.isnull().sum() | code |
90129163/cell_55 | [
"text_plain_output_1.png"
] | features | code |
90129163/cell_6 | [
"text_plain_output_1.png"
] | import warnings
warnings.filterwarnings('ignore') | code |
90129163/cell_76 | [
"text_plain_output_1.png"
] | N_SPLITS = 20
folds = StratifiedKFold(n_splits=N_SPLITS, shuffle=True) | code |
90129163/cell_29 | [
"text_plain_output_1.png"
] | def total_billed(df):
"""
Calculates total amount billed in the trip to the passenger...
Args:
Returns:
"""
df['Total_Billed'] = df['RoomService'] + df['FoodCourt'] + df['ShoppingMall'] + df['Spa'] + df['VRDeck']
return df | code |
90129163/cell_39 | [
"text_plain_output_1.png"
] | trn_data = route(trn_data)
tst_data = route(tst_data) | code |
90129163/cell_65 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
def feature_importance(clf):
importances = clf.feature_importances_
i = np.argsort(importances)
features = X_train.columns
plt.title('Feature Importance')
plt.barh(range(len(i)), importances[i], align='center')
plt.yticks(range(len(i)), [features[x] for x in i])
plt.xlabel('Scale')
plt.show() | code |
90129163/cell_48 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | trn_data, tst_data = encode_categorical(trn_data, tst_data, categorical_features) | code |
90129163/cell_73 | [
"text_plain_output_1.png"
] | code |
|
90129163/cell_41 | [
"text_plain_output_1.png"
] | trn_data = age_groups(trn_data)
tst_data = age_groups(tst_data) | code |
90129163/cell_61 | [
"text_plain_output_1.png"
] | cls = XGBClassifier(**param)
cls.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], eval_metric=['logloss'], early_stopping_rounds=128, verbose=False) | code |
90129163/cell_54 | [
"text_plain_output_1.png"
] | remove = ['PassengerId', 'Route', 'FirstName_Enc', 'CabinNum_Enc', 'Transported']
features = [feat for feat in trn_data.columns if feat not in remove] | code |
90129163/cell_72 | [
"text_plain_output_1.png"
] | code |
|
90129163/cell_67 | [
"text_plain_output_1.png"
] | preds = cls.predict(tst_data[features]) | code |
90129163/cell_60 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | param = {'learning_rate': 0.05, 'n_estimators': 1024, 'n_jobs': -1, 'random_state': 42, 'objective': 'binary:logistic'} | code |
90129163/cell_19 | [
"text_plain_output_1.png"
] | tst_data.head() | code |
90129163/cell_7 | [
"text_plain_output_1.png"
] | DATA_ROWS = None
NROWS = 50
NCOLS = 15
BASE_PATH = '...' | code |
90129163/cell_18 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | trn_data.isnull().sum() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.