path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
105193696/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns | code |
105193696/cell_23 | [
"text_plain_output_1.png"
] | import folium
import folium
map = folium.Map(location=[40.672243, -73.827903])
map | code |
105193696/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_start.columns
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.isnull().sum()
df_start['year'] | code |
105193696/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_start.columns
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.isnull().sum()
df_start['jockey'].value_counts() | code |
105193696/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_tracking['month'].value_counts() | code |
105193696/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_race.columns | code |
105193696/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_start.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_tracking['year'] = df_tracking['race_date'].apply(lambda x: int(x.split('-')[0]))
df_tracking['year']
df_tracking['month'] = df_tracking['race_date'].apply(lambda x: int(x.split('-')[1]))
df_tracking['month']
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.isnull().sum()
df_start['year'] = df_start['race_date'].apply(lambda x: int(x.split('-')[0]))
df_start['year'] | code |
105193696/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_start.columns
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.info() | code |
105193696/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking.head(10) | code |
105193696/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_tracking['year'] = df_tracking['race_date'].apply(lambda x: int(x.split('-')[0]))
df_tracking['year']
df_tracking['month'] = df_tracking['race_date'].apply(lambda x: int(x.split('-')[1]))
df_tracking['month'] | code |
105193696/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105193696/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_2019.columns | code |
105193696/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_tracking['year'].value_counts() | code |
105193696/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_start.columns
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.isnull().sum()
df_start['month'].value_counts().plot(kind='bar', figsize=(16, 8)) | code |
105193696/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_start.columns
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.isnull().sum() | code |
105193696/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.info() | code |
105193696/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv') | code |
105193696/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_tracking['year'] = df_tracking['race_date'].apply(lambda x: int(x.split('-')[0]))
df_tracking['year'] | code |
105193696/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_start.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_tracking['year'] = df_tracking['race_date'].apply(lambda x: int(x.split('-')[0]))
df_tracking['year']
df_tracking['month'] = df_tracking['race_date'].apply(lambda x: int(x.split('-')[1]))
df_tracking['month']
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.isnull().sum()
df_start['year'] = df_start['race_date'].apply(lambda x: int(x.split('-')[0]))
df_start['year']
df_start['month'] = df_start['race_date'].apply(lambda x: int(x.split('-')[1]))
df_start['month'] | code |
105193696/cell_22 | [
"text_plain_output_1.png"
] | pip install folium | code |
105193696/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum() | code |
105193696/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_start.columns
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_start
df_start.describe(include='all') | code |
105193696/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_tracking.columns
df_tracking.isnull().sum()
df_tracking.duplicated().sum()
df_tracking['track_id'].value_counts() | code |
105193696/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
df_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
df_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
df_2019 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_2019_complete.csv')
df_start.columns | code |
17118187/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout
from keras.models import Sequential, Model
from keras.optimizers import RMSprop, Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from math import ceil
from sklearn.model_selection import train_test_split
import numpy as np
import os
import pandas as pd
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
df_train['diagnosis'] = df_train['diagnosis'].astype('str')
df_train = df_train[['id_code', 'diagnosis']]
if df_train['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_train['id_code'])):
df_train['id_code'][index] = df_train['id_code'][index] + '.png'
df_test = df_test[['id_code']]
if df_test['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_test['id_code'])):
df_test['id_code'][index] = df_test['id_code'][index] + '.png'
train_data = np.arange(df_train.shape[0])
train_idx, val_idx = train_test_split(train_data, train_size=0.8, random_state=2019)
X_train = df_train.iloc[train_idx, :]
X_val = df_train.iloc[val_idx, :]
X_test = df_test
num_classes = 5
img_size = (299, 299, 3)
nb_train_samples = len(X_train)
nb_validation_samples = len(X_val)
nb_test_samples = len(X_test)
epochs = 50
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, width_shift_range=0.1, height_shift_range=0.1, brightness_range=[0.5, 1.5])
val_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=X_train, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, seed=2019)
validation_generator = val_datagen.flow_from_dataframe(dataframe=X_val, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, shuffle=False)
test_generator = test_datagen.flow_from_dataframe(dataframe=X_test, directory=TEST_IMG_PATH, x_col='id_code', y_col=None, target_size=img_size[:2], color_mode='rgb', class_mode=None, batch_size=batch_size, shuffle=False)
def get_model(file_path, input_shape, num_classes):
input_tensor = Input(shape=input_shape)
base_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
base_model.load_weights(filepath=file_path)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.25)(x)
output_tensor = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=input_tensor, outputs=output_tensor)
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
model_path = '../input/inceptionv3/'
weight_file = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = get_model(file_path=os.path.join(model_path, weight_file), input_shape=img_size, num_classes=num_classes)
LOG_DIR = './logs'
if not os.path.isdir(LOG_DIR):
os.mkdir(LOG_DIR)
else:
pass
CKPT_PATH = LOG_DIR + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'
checkPoint = ModelCheckpoint(filepath=CKPT_PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
reduceLROnPlateau = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-06, verbose=1, mode='min')
earlyStopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
history = model.fit_generator(train_generator, steps_per_epoch=ceil(nb_train_samples / batch_size), epochs=epochs, validation_data=validation_generator, validation_steps=ceil(nb_validation_samples / batch_size), callbacks=[checkPoint, reduceLROnPlateau, earlyStopping], verbose=2) | code |
17118187/cell_9 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
import numpy as np
import os
import pandas as pd
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
df_train['diagnosis'] = df_train['diagnosis'].astype('str')
df_train = df_train[['id_code', 'diagnosis']]
if df_train['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_train['id_code'])):
df_train['id_code'][index] = df_train['id_code'][index] + '.png'
df_test = df_test[['id_code']]
if df_test['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_test['id_code'])):
df_test['id_code'][index] = df_test['id_code'][index] + '.png'
train_data = np.arange(df_train.shape[0])
train_idx, val_idx = train_test_split(train_data, train_size=0.8, random_state=2019)
X_train = df_train.iloc[train_idx, :]
X_val = df_train.iloc[val_idx, :]
X_test = df_test
num_classes = 5
img_size = (299, 299, 3)
nb_train_samples = len(X_train)
nb_validation_samples = len(X_val)
nb_test_samples = len(X_test)
epochs = 50
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, width_shift_range=0.1, height_shift_range=0.1, brightness_range=[0.5, 1.5])
val_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=X_train, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, seed=2019)
validation_generator = val_datagen.flow_from_dataframe(dataframe=X_val, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, shuffle=False)
test_generator = test_datagen.flow_from_dataframe(dataframe=X_test, directory=TEST_IMG_PATH, x_col='id_code', y_col=None, target_size=img_size[:2], color_mode='rgb', class_mode=None, batch_size=batch_size, shuffle=False) | code |
17118187/cell_4 | [
"image_output_1.png"
] | import os
import pandas as pd
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
print('num of train images ', len(os.listdir(TRAIN_IMG_PATH)))
print('num of test images ', len(os.listdir(TEST_IMG_PATH))) | code |
17118187/cell_2 | [
"text_plain_output_1.png"
] | import os
import sys
import numpy as np
import pandas as pd
import cv2
import seaborn as sns
from math import ceil
from tqdm import tqdm
from PIL import Image
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout
from keras.optimizers import RMSprop, Adam, SGD
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping | code |
17118187/cell_19 | [
"text_plain_output_1.png"
] | from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout
from keras.models import Sequential, Model
from keras.optimizers import RMSprop, Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from math import ceil
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
import seaborn as sns
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
df_train['diagnosis'] = df_train['diagnosis'].astype('str')
df_train = df_train[['id_code', 'diagnosis']]
if df_train['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_train['id_code'])):
df_train['id_code'][index] = df_train['id_code'][index] + '.png'
df_test = df_test[['id_code']]
if df_test['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_test['id_code'])):
df_test['id_code'][index] = df_test['id_code'][index] + '.png'
train_data = np.arange(df_train.shape[0])
train_idx, val_idx = train_test_split(train_data, train_size=0.8, random_state=2019)
X_train = df_train.iloc[train_idx, :]
X_val = df_train.iloc[val_idx, :]
X_test = df_test
num_classes = 5
img_size = (299, 299, 3)
nb_train_samples = len(X_train)
nb_validation_samples = len(X_val)
nb_test_samples = len(X_test)
epochs = 50
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, width_shift_range=0.1, height_shift_range=0.1, brightness_range=[0.5, 1.5])
val_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=X_train, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, seed=2019)
validation_generator = val_datagen.flow_from_dataframe(dataframe=X_val, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, shuffle=False)
test_generator = test_datagen.flow_from_dataframe(dataframe=X_test, directory=TEST_IMG_PATH, x_col='id_code', y_col=None, target_size=img_size[:2], color_mode='rgb', class_mode=None, batch_size=batch_size, shuffle=False)
def get_model(file_path, input_shape, num_classes):
input_tensor = Input(shape=input_shape)
base_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
base_model.load_weights(filepath=file_path)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.25)(x)
output_tensor = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=input_tensor, outputs=output_tensor)
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
model_path = '../input/inceptionv3/'
weight_file = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = get_model(file_path=os.path.join(model_path, weight_file), input_shape=img_size, num_classes=num_classes)
LOG_DIR = './logs'
if not os.path.isdir(LOG_DIR):
os.mkdir(LOG_DIR)
else:
pass
CKPT_PATH = LOG_DIR + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'
checkPoint = ModelCheckpoint(filepath=CKPT_PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
reduceLROnPlateau = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-06, verbose=1, mode='min')
earlyStopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
history = model.fit_generator(train_generator, steps_per_epoch=ceil(nb_train_samples / batch_size), epochs=epochs, validation_data=validation_generator, validation_steps=ceil(nb_validation_samples / batch_size), callbacks=[checkPoint, reduceLROnPlateau, earlyStopping], verbose=2)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
log_dir_list = os.listdir(LOG_DIR)
ckpt_list = []
for file in log_dir_list:
if file.split('-')[0] == 'checkpoint':
ckpt_list.append(file)
loss_list = []
for file in ckpt_list:
file = file.split('-')[2]
file = file[:-3]
loss_list.append(file)
loss = ckpt_list[loss_list.index(min(loss_list))]
best_model = LOG_DIR + '/' + loss
model.load_weights(best_model)
test_generator.reset()
preds_tta = []
tta_steps = 10
for i in tqdm(range(tta_steps)):
preds = model.predict_generator(generator=test_generator, steps=ceil(nb_test_samples / batch_size))
preds_tta.append(preds)
preds_mean = np.mean(preds_tta, axis=0)
predicted_class_indices = np.argmax(preds_mean, axis=1) | code |
17118187/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import os
import pandas as pd
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
df_train['diagnosis'] = df_train['diagnosis'].astype('str')
df_train = df_train[['id_code', 'diagnosis']]
if df_train['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_train['id_code'])):
df_train['id_code'][index] = df_train['id_code'][index] + '.png'
df_test = df_test[['id_code']]
if df_test['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_test['id_code'])):
df_test['id_code'][index] = df_test['id_code'][index] + '.png'
train_data = np.arange(df_train.shape[0])
train_idx, val_idx = train_test_split(train_data, train_size=0.8, random_state=2019)
X_train = df_train.iloc[train_idx, :]
X_val = df_train.iloc[val_idx, :]
X_test = df_test
print(X_train.shape)
print(X_val.shape)
print(X_test.shape) | code |
17118187/cell_15 | [
"text_plain_output_1.png"
] | from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout
from keras.models import Sequential, Model
from keras.optimizers import RMSprop, Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from math import ceil
from sklearn.model_selection import train_test_split
import numpy as np
import os
import pandas as pd
import seaborn as sns
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
df_train['diagnosis'] = df_train['diagnosis'].astype('str')
df_train = df_train[['id_code', 'diagnosis']]
if df_train['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_train['id_code'])):
df_train['id_code'][index] = df_train['id_code'][index] + '.png'
df_test = df_test[['id_code']]
if df_test['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_test['id_code'])):
df_test['id_code'][index] = df_test['id_code'][index] + '.png'
train_data = np.arange(df_train.shape[0])
train_idx, val_idx = train_test_split(train_data, train_size=0.8, random_state=2019)
X_train = df_train.iloc[train_idx, :]
X_val = df_train.iloc[val_idx, :]
X_test = df_test
num_classes = 5
img_size = (299, 299, 3)
nb_train_samples = len(X_train)
nb_validation_samples = len(X_val)
nb_test_samples = len(X_test)
epochs = 50
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, width_shift_range=0.1, height_shift_range=0.1, brightness_range=[0.5, 1.5])
val_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=X_train, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, seed=2019)
validation_generator = val_datagen.flow_from_dataframe(dataframe=X_val, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, shuffle=False)
test_generator = test_datagen.flow_from_dataframe(dataframe=X_test, directory=TEST_IMG_PATH, x_col='id_code', y_col=None, target_size=img_size[:2], color_mode='rgb', class_mode=None, batch_size=batch_size, shuffle=False)
def get_model(file_path, input_shape, num_classes):
input_tensor = Input(shape=input_shape)
base_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
base_model.load_weights(filepath=file_path)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.25)(x)
output_tensor = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=input_tensor, outputs=output_tensor)
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
model_path = '../input/inceptionv3/'
weight_file = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = get_model(file_path=os.path.join(model_path, weight_file), input_shape=img_size, num_classes=num_classes)
LOG_DIR = './logs'
if not os.path.isdir(LOG_DIR):
os.mkdir(LOG_DIR)
else:
pass
CKPT_PATH = LOG_DIR + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'
checkPoint = ModelCheckpoint(filepath=CKPT_PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
reduceLROnPlateau = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-06, verbose=1, mode='min')
earlyStopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
history = model.fit_generator(train_generator, steps_per_epoch=ceil(nb_train_samples / batch_size), epochs=epochs, validation_data=validation_generator, validation_steps=ceil(nb_validation_samples / batch_size), callbacks=[checkPoint, reduceLROnPlateau, earlyStopping], verbose=2)
acc = history.history['acc']
val_acc = history.history['val_acc']
plt.plot(acc)
plt.plot(val_acc)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show() | code |
17118187/cell_16 | [
"image_output_1.png"
] | from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout
from keras.models import Sequential, Model
from keras.optimizers import RMSprop, Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from math import ceil
from sklearn.model_selection import train_test_split
import numpy as np
import os
import pandas as pd
import seaborn as sns
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
df_train['diagnosis'] = df_train['diagnosis'].astype('str')
df_train = df_train[['id_code', 'diagnosis']]
if df_train['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_train['id_code'])):
df_train['id_code'][index] = df_train['id_code'][index] + '.png'
df_test = df_test[['id_code']]
if df_test['id_code'][0].split('.')[-1] != 'png':
for index in range(len(df_test['id_code'])):
df_test['id_code'][index] = df_test['id_code'][index] + '.png'
train_data = np.arange(df_train.shape[0])
train_idx, val_idx = train_test_split(train_data, train_size=0.8, random_state=2019)
X_train = df_train.iloc[train_idx, :]
X_val = df_train.iloc[val_idx, :]
X_test = df_test
num_classes = 5
img_size = (299, 299, 3)
nb_train_samples = len(X_train)
nb_validation_samples = len(X_val)
nb_test_samples = len(X_test)
epochs = 50
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, vertical_flip=True, width_shift_range=0.1, height_shift_range=0.1, brightness_range=[0.5, 1.5])
val_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(dataframe=X_train, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, seed=2019)
validation_generator = val_datagen.flow_from_dataframe(dataframe=X_val, directory=TRAIN_IMG_PATH, x_col='id_code', y_col='diagnosis', target_size=img_size[:2], color_mode='rgb', class_mode='categorical', batch_size=batch_size, shuffle=False)
test_generator = test_datagen.flow_from_dataframe(dataframe=X_test, directory=TEST_IMG_PATH, x_col='id_code', y_col=None, target_size=img_size[:2], color_mode='rgb', class_mode=None, batch_size=batch_size, shuffle=False)
def get_model(file_path, input_shape, num_classes):
input_tensor = Input(shape=input_shape)
base_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
base_model.load_weights(filepath=file_path)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.25)(x)
output_tensor = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=input_tensor, outputs=output_tensor)
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
model_path = '../input/inceptionv3/'
weight_file = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = get_model(file_path=os.path.join(model_path, weight_file), input_shape=img_size, num_classes=num_classes)
LOG_DIR = './logs'
if not os.path.isdir(LOG_DIR):
os.mkdir(LOG_DIR)
else:
pass
CKPT_PATH = LOG_DIR + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'
checkPoint = ModelCheckpoint(filepath=CKPT_PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
reduceLROnPlateau = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-06, verbose=1, mode='min')
earlyStopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
history = model.fit_generator(train_generator, steps_per_epoch=ceil(nb_train_samples / batch_size), epochs=epochs, validation_data=validation_generator, validation_steps=ceil(nb_validation_samples / batch_size), callbacks=[checkPoint, reduceLROnPlateau, earlyStopping], verbose=2)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.plot(loss)
plt.plot(val_loss)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show() | code |
17118187/cell_5 | [
"image_output_1.png"
] | import os
import pandas as pd
import seaborn as sns
DATA_PATH = '../input/aptos2019-blindness-detection'
TRAIN_IMG_PATH = os.path.join(DATA_PATH, 'train_images')
TEST_IMG_PATH = os.path.join(DATA_PATH, 'test_images')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH, 'train.csv')
TEST_LABEL_PATH = os.path.join(DATA_PATH, 'test.csv')
df_train = pd.read_csv(TRAIN_LABEL_PATH)
df_test = pd.read_csv(TEST_LABEL_PATH)
plt.figure(figsize=(12, 6))
sns.countplot(df_train['diagnosis'])
plt.title('Number of data per each diagnosis')
plt.show() | code |
2007531/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2007531/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
my_model = LogisticRegression()
my_model.fit(train_X, train_y)
my_model.score(val_X, val_y) | code |
2007531/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ginf.csv')
df.head() | code |
73060852/cell_4 | [
"text_plain_output_1.png"
] | from sklearn import neighbors
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import classification_report
from sklearn.metrics import mean_absolute_error
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
test_data = pd.read_csv('../input/testdata/UNSW_NB15_testing-set.csv', index_col=0)
train_data = pd.read_csv('../input/testdata/UNSW_NB15_training-set.csv', index_col=0)
test_data.index = test_data.index + len(train_data)
(len(train_data), len(test_data))
total = pd.concat([train_data, test_data], axis=0)
significant_proto = total.proto.value_counts()[:5]
total.loc[~total['proto'].isin(significant_proto.index), 'proto'] = '-'
total = pd.concat([total, pd.get_dummies(total['proto'], prefix='proto')], axis=1)
total.drop('proto', axis=1, inplace=True)
features = ['dur', 'proto_-', 'proto_arp', 'proto_ospf', 'proto_tcp', 'proto_udp', 'proto_unas', 'spkts', 'dpkts', 'sbytes', 'rate']
values = ['dur', 'spkts', 'dpkts', 'rate', 'sbytes']
total['dur'] = np.log(total['dur'] + 1)
total['spkts'] = np.log(total['spkts'] + 1)
total['dpkts'] = np.log(total['dpkts'] + 1)
total['rate'] = np.log(total['rate'] + 1)
total['sbytes'] = np.log(total['sbytes'] + 1)
train_data = total.loc[train_data.index]
test_data = total.loc[test_data.index]
X = train_data[features]
y = train_data['label']
y.count
model = RandomForestRegressor()
model.fit(X, y)
predictions = model.predict(test_data[features])
mae = mean_absolute_error(predictions, test_data.label)
X = train_data[features]
y = train_data['label']
y.count
model = neighbors.KNeighborsClassifier()
model.fit(X, y)
predictions = model.predict(test_data[features])
mae = mean_absolute_error(predictions, test_data.label)
print(mae)
print(classification_report(test_data.label, predictions)) | code |
73060852/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report
from sklearn import neighbors
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73060852/cell_3 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
test_data = pd.read_csv('../input/testdata/UNSW_NB15_testing-set.csv', index_col=0)
train_data = pd.read_csv('../input/testdata/UNSW_NB15_training-set.csv', index_col=0)
test_data.index = test_data.index + len(train_data)
(len(train_data), len(test_data))
total = pd.concat([train_data, test_data], axis=0)
significant_proto = total.proto.value_counts()[:5]
total.loc[~total['proto'].isin(significant_proto.index), 'proto'] = '-'
total = pd.concat([total, pd.get_dummies(total['proto'], prefix='proto')], axis=1)
total.drop('proto', axis=1, inplace=True)
features = ['dur', 'proto_-', 'proto_arp', 'proto_ospf', 'proto_tcp', 'proto_udp', 'proto_unas', 'spkts', 'dpkts', 'sbytes', 'rate']
values = ['dur', 'spkts', 'dpkts', 'rate', 'sbytes']
total['dur'] = np.log(total['dur'] + 1)
total['spkts'] = np.log(total['spkts'] + 1)
total['dpkts'] = np.log(total['dpkts'] + 1)
total['rate'] = np.log(total['rate'] + 1)
total['sbytes'] = np.log(total['sbytes'] + 1)
train_data = total.loc[train_data.index]
test_data = total.loc[test_data.index]
X = train_data[features]
y = train_data['label']
y.count
model = RandomForestRegressor()
model.fit(X, y)
predictions = model.predict(test_data[features])
mae = mean_absolute_error(predictions, test_data.label)
print(mae) | code |
129030086/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | df2015 = pd.read_csv('/kaggle/input/world-happiness/2015.csv')
df2016 = pd.read_csv('/kaggle/input/world-happiness/2016.csv')
df2017 = pd.read_csv('/kaggle/input/world-happiness/2017.csv')
df2018 = pd.read_csv('/kaggle/input/world-happiness/2018.csv')
df2019 = pd.read_csv('/kaggle/input/world-happiness/2019.csv') | code |
128027172/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts() | code |
128027172/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape | code |
128027172/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
Years = df['Year'].value_counts().sort_values(ascending=False)
Years.head(10) | code |
128027172/cell_33 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
Years = df['Year'].value_counts().sort_values(ascending=False)
Years.to_frame().plot(kind='bar', color='purple', figsize=(15, 8))
plt.title('Year trends') | code |
128027172/cell_44 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
final.columns
Genre = final.Genre.value_counts()
Genre | code |
128027172/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts() | code |
128027172/cell_40 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
JP_Sales.head(5).plot(kind='bar', color='purple', figsize=(15, 8)) | code |
128027172/cell_39 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
final.columns | code |
128027172/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
Global_Sales = df.groupby(['Name'])['Global_Sales'].sum().sort_values(ascending=False)
Global_Sales.head(5).plot(kind='bar', color='purple', figsize=(15, 8)) | code |
128027172/cell_54 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
Global_Sales = df.groupby(['Name'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Publisher = df.groupby(['Publisher'])['Global_Sales'].sum().sort_values(ascending=False)
Platform = df.groupby(['Platform'])['Global_Sales'].sum().sort_values(ascending=False)
Platform.head(5).plot(kind='bar', color='purple', figsize=(15, 8)) | code |
128027172/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.head() | code |
128027172/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts() | code |
128027172/cell_50 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
Global_Sales = df.groupby(['Name'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Publisher = df.groupby(['Publisher'])['Global_Sales'].sum().sort_values(ascending=False)
Publisher.head(5) | code |
128027172/cell_52 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
final.columns
Genre = final.Genre.value_counts()
Genre
final.Platform.value_counts().head(10) | code |
128027172/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128027172/cell_45 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
Global_Sales = df.groupby(['Name'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales.head() | code |
128027172/cell_49 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
final.columns
Genre = final.Genre.value_counts()
Genre
final.Publisher.value_counts().head(10) | code |
128027172/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
Years = df['Year'].value_counts().sort_values(ascending=False)
Years.tail(10).to_frame().plot(kind='barh', color='purple', figsize=(15, 8))
plt.title('10 least frequent Years') | code |
128027172/cell_51 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
Global_Sales = df.groupby(['Name'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Publisher = df.groupby(['Publisher'])['Global_Sales'].sum().sort_values(ascending=False)
Publisher.head(5).plot(kind='bar', color='purple', figsize=(15, 8)) | code |
128027172/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.describe() | code |
128027172/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum() | code |
128027172/cell_38 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
EU_Sales.head(5).plot(kind='bar', color='purple', figsize=(15, 8)) | code |
128027172/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
Years = df['Year'].value_counts().sort_values(ascending=False)
Years.head(10).to_frame().plot(kind='barh', color='purple', figsize=(15, 8))
plt.title('10 most frequent Years') | code |
128027172/cell_46 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
Global_Sales = df.groupby(['Name'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales.head(5).plot(kind='bar', color='purple', figsize=(15, 8)) | code |
128027172/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.info() | code |
128027172/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape | code |
128027172/cell_53 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
EU_Sales = df.groupby(['Name'])['EU_Sales'].sum().sort_values(ascending=False)
JP_Sales = df.groupby(['Name'])['JP_Sales'].sum().sort_values(ascending=False)
Global_Sales = df.groupby(['Name'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Genre_Sales = df.groupby(['Genre'])['Global_Sales'].sum().sort_values(ascending=False)
Publisher = df.groupby(['Publisher'])['Global_Sales'].sum().sort_values(ascending=False)
Platform = df.groupby(['Platform'])['Global_Sales'].sum().sort_values(ascending=False)
Platform.head() | code |
128027172/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
NA_Sales = df.groupby(['Name'])['NA_Sales'].sum().sort_values(ascending=False)
NA_Sales.head(5).plot(kind='bar', color='purple', figsize=(15, 8)) | code |
128027172/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10) | code |
128027172/cell_36 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
df.sample(10)
df.isna().sum()
df.Name.value_counts()
df.Genre.value_counts()
df.Year.value_counts()
df.shape
final = df.dropna()
final.shape
Years = df['Year'].value_counts().sort_values(ascending=False)
sales = pd.DataFrame(final, columns=['Rank', 'Year', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales'])
sales.hist()
plt.show() | code |
33096468/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
recipes[['minutes', 'n_steps', 'n_ingredients']].hist() | code |
33096468/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
ratings_by_recipe = interactions_l0y.groupby(['recipe_id', 'year']).agg(rating_cnt=('rating', 'count'), rating_avg=('rating', 'mean'))
ratings_by_recipe.head() | code |
33096468/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
print(recipes_l0y.shape)
print(interactions_l0y.shape) | code |
33096468/cell_29 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
recipes_l0y = recipes_l0y.query('minutes < 1051200')
recipes_l0y['year'] = recipes_l0y['submitted'].dt.year
interactions_l0y['year'] = interactions_l0y['date'].dt.year | code |
33096468/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib_venn import venn2
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
veg_meat = ['#454d66', '#b7e778', '#1fab89']
sns.set_palette(veg_meat)
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
recipes_l0y = recipes_l0y.query('minutes < 1051200')
ratings_by_recipe = interactions_l0y.groupby(['recipe_id', 'year']).agg(rating_cnt=('rating', 'count'), rating_avg=('rating', 'mean'))
recipes_and_ratings = recipes_l0y.merge(ratings_by_recipe, left_on='id', right_on='recipe_id')
recipes_and_ratings['vegetarian'] = ['vegetarian' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings['vegan'] = ['vegan' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings = recipes_and_ratings.drop(columns=['name', 'tags', 'nutrition', 'steps', 'description', 'ingredients'])
vegetarian_cnt = len(recipes_and_ratings.query('vegetarian == True'))
vegan_cnt = len(recipes_and_ratings.query('vegan == True'))
intersect_cnt = len(recipes_and_ratings.query('vegetarian == True and vegan == True'))
venn2(subsets=(vegetarian_cnt, vegan_cnt - intersect_cnt, intersect_cnt), set_labels=('Vegatarian', 'Vegan'), set_colors=('#b7e778', '#031c16', '#031c16'), alpha=1)
df = recipes_and_ratings.groupby(['year', 'vegetarian']).agg(recipe_cnt=('id', 'count')).reset_index()
plt.figure(figsize=(12, 6))
ax = sns.lineplot(data=df, x='year', y='recipe_cnt', hue='vegetarian', linewidth=2.5)
ax.set(ylim=(0, None))
ax.set_title('Number of new recipees by year')
ax | code |
33096468/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
print(recipes.info())
recipes.describe() | code |
33096468/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
recipes_l0y = recipes_l0y.query('minutes < 1051200')
ratings_by_recipe = interactions_l0y.groupby(['recipe_id', 'year']).agg(rating_cnt=('rating', 'count'), rating_avg=('rating', 'mean'))
recipes_and_ratings = recipes_l0y.merge(ratings_by_recipe, left_on='id', right_on='recipe_id')
recipes_and_ratings.head(2) | code |
33096468/cell_15 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
print(interactions.info())
interactions.describe() | code |
33096468/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
interactions['rating'].hist() | code |
33096468/cell_43 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib_venn import venn2
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
veg_meat = ['#454d66', '#b7e778', '#1fab89']
sns.set_palette(veg_meat)
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
recipes_l0y = recipes_l0y.query('minutes < 1051200')
ratings_by_recipe = interactions_l0y.groupby(['recipe_id', 'year']).agg(rating_cnt=('rating', 'count'), rating_avg=('rating', 'mean'))
recipes_and_ratings = recipes_l0y.merge(ratings_by_recipe, left_on='id', right_on='recipe_id')
recipes_and_ratings['vegetarian'] = ['vegetarian' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings['vegan'] = ['vegan' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings = recipes_and_ratings.drop(columns=['name', 'tags', 'nutrition', 'steps', 'description', 'ingredients'])
vegetarian_cnt = len(recipes_and_ratings.query('vegetarian == True'))
vegan_cnt = len(recipes_and_ratings.query('vegan == True'))
intersect_cnt = len(recipes_and_ratings.query('vegetarian == True and vegan == True'))
venn2(subsets=(vegetarian_cnt, vegan_cnt - intersect_cnt, intersect_cnt), set_labels=('Vegatarian', 'Vegan'), set_colors=('#b7e778', '#031c16', '#031c16'), alpha=1)
df = recipes_and_ratings.groupby(['year', 'vegetarian']).agg(
recipe_cnt = ('id', 'count')
).reset_index()
plt.figure(figsize=(12,6))
ax = sns.lineplot(data=df, x='year', y='recipe_cnt', hue='vegetarian', linewidth=2.5)
ax.set(ylim=(0, None))
ax.set_title('Number of new recipees by year')
ax
df = recipes_and_ratings.groupby(['year']).agg(total_cnt=('id', 'count'), vegetarian_cnt=('vegetarian', 'sum'), vegan_cnt=('vegan', 'sum')).reset_index()
df['vegetarian_pct'] = df['vegetarian_cnt'] / df['total_cnt'] * 100
df['vegan_pct'] = df['vegan_cnt'] / df['total_cnt'] * 100
plt.figure(figsize=(12, 6))
ax = sns.lineplot(data=pd.melt(df[['year', 'vegetarian_pct', 'vegan_pct']], ['year']), x='year', y='value', palette=veg_meat[1:], hue='variable', linewidth=2.5)
ax.set(ylim=(0, 100))
ax.set_title('Percent of vegetarian recipes by year')
ax | code |
33096468/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
Q1 = recipes_l0y['minutes'].quantile(0.25)
Q3 = recipes_l0y['minutes'].quantile(0.75)
IQR = Q3 - Q1
max_value = Q3 + 1.5 * IQR
min_value = Q1 - 1.5 * IQR
minutes_outliers = recipes_l0y[(recipes_l0y['minutes'] > max_value) | (recipes_l0y['minutes'] < min_value)]
minutes_outliers.sort_values('minutes') | code |
33096468/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
interactions.head() | code |
33096468/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
veg_meat = ['#454d66', '#b7e778', '#1fab89']
sns.set_palette(veg_meat)
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
sns.boxplot(x=recipes_l0y['minutes']) | code |
33096468/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
recipes.head() | code |
33096468/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib_venn import venn2
import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
recipes_l0y = recipes_l0y.query('minutes < 1051200')
ratings_by_recipe = interactions_l0y.groupby(['recipe_id', 'year']).agg(rating_cnt=('rating', 'count'), rating_avg=('rating', 'mean'))
recipes_and_ratings = recipes_l0y.merge(ratings_by_recipe, left_on='id', right_on='recipe_id')
recipes_and_ratings['vegetarian'] = ['vegetarian' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings['vegan'] = ['vegan' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings = recipes_and_ratings.drop(columns=['name', 'tags', 'nutrition', 'steps', 'description', 'ingredients'])
vegetarian_cnt = len(recipes_and_ratings.query('vegetarian == True'))
vegan_cnt = len(recipes_and_ratings.query('vegan == True'))
intersect_cnt = len(recipes_and_ratings.query('vegetarian == True and vegan == True'))
venn2(subsets=(vegetarian_cnt, vegan_cnt - intersect_cnt, intersect_cnt), set_labels=('Vegatarian', 'Vegan'), set_colors=('#b7e778', '#031c16', '#031c16'), alpha=1) | code |
33096468/cell_36 | [
"text_html_output_1.png"
] | import pandas as pd
recipes = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_recipes.csv')
interactions = pd.read_csv('/kaggle/input/food-com-recipes-and-user-interactions/RAW_interactions.csv')
from_year, to_year = ('2008-01-01', '2017-12-31')
recipes['submitted'] = pd.to_datetime(recipes['submitted'])
recipes['submitted'] = recipes['submitted'].apply(lambda x: x.tz_localize(None))
recipes_l0y = recipes.loc[recipes['submitted'].between(from_year, to_year, inclusive=False)]
interactions['date'] = pd.to_datetime(interactions['date'])
interactions['date'] = interactions['date'].apply(lambda x: x.tz_localize(None))
interactions_l0y = interactions.loc[interactions['date'].between(from_year, to_year, inclusive=False)]
recipes_l0y = recipes_l0y.query('minutes < 1051200')
ratings_by_recipe = interactions_l0y.groupby(['recipe_id', 'year']).agg(rating_cnt=('rating', 'count'), rating_avg=('rating', 'mean'))
recipes_and_ratings = recipes_l0y.merge(ratings_by_recipe, left_on='id', right_on='recipe_id')
recipes_and_ratings['vegetarian'] = ['vegetarian' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings['vegan'] = ['vegan' in tag for tag in recipes_and_ratings['tags']]
recipes_and_ratings = recipes_and_ratings.drop(columns=['name', 'tags', 'nutrition', 'steps', 'description', 'ingredients'])
recipes_and_ratings.head(2) | code |
105206399/cell_42 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
def check(df):
l = []
columns = df.columns
for col in columns:
dtypes = df[col].dtypes
nunique = df[col].nunique()
sum_null = df[col].isnull().sum()
l.append([col, dtypes, nunique, sum_null])
df_check = pd.DataFrame(l)
df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null']
return df_check
check(df_cars)
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique()
categorical_cols = df_cars.select_dtypes(include=['object']).columns
c = df_cars.corr()
cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']]
x = cars.drop('price', axis=1).values
y = cars['price'].values
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
reg.score(x_train, y_train)
reg.score(x_test, y_test)
reg.intercept_
reg.coef_
pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_1 = reg.predict(x_test)
df_1 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_1})
df_1.head(10) | code |
105206399/cell_9 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
df_cars.describe() | code |
105206399/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique()
categorical_cols = df_cars.select_dtypes(include=['object']).columns
plt.figure(figsize=(30, 20))
c = df_cars.corr()
sns.heatmap(c, annot=True) | code |
105206399/cell_57 | [
"text_html_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
from sklearn import linear_model
lass = linear_model.Lasso(alpha=0.6)
lass.fit(x_train, y_train)
lass.score(x_train, y_train) | code |
105206399/cell_56 | [
"text_html_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
from sklearn import linear_model
lass = linear_model.Lasso(alpha=0.6)
lass.fit(x_train, y_train) | code |
105206399/cell_34 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train) | code |
105206399/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique() | code |
105206399/cell_39 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
def check(df):
l = []
columns = df.columns
for col in columns:
dtypes = df[col].dtypes
nunique = df[col].nunique()
sum_null = df[col].isnull().sum()
l.append([col, dtypes, nunique, sum_null])
df_check = pd.DataFrame(l)
df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null']
return df_check
check(df_cars)
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique()
categorical_cols = df_cars.select_dtypes(include=['object']).columns
c = df_cars.corr()
cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']]
x = cars.drop('price', axis=1).values
y = cars['price'].values
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
reg.score(x_train, y_train)
reg.score(x_test, y_test)
reg.intercept_
reg.coef_
pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients']) | code |
105206399/cell_48 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
rid.score(x_train, y_train)
rid.score(x_test, y_test)
rid.intercept_ | code |
105206399/cell_61 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
def check(df):
l = []
columns = df.columns
for col in columns:
dtypes = df[col].dtypes
nunique = df[col].nunique()
sum_null = df[col].isnull().sum()
l.append([col, dtypes, nunique, sum_null])
df_check = pd.DataFrame(l)
df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null']
return df_check
check(df_cars)
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique()
categorical_cols = df_cars.select_dtypes(include=['object']).columns
c = df_cars.corr()
cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']]
x = cars.drop('price', axis=1).values
y = cars['price'].values
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
reg.score(x_train, y_train)
reg.score(x_test, y_test)
reg.intercept_
reg.coef_
pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_1 = reg.predict(x_test)
df_1 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_1})
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
rid.score(x_train, y_train)
rid.score(x_test, y_test)
rid.intercept_
rid.coef_
pd.DataFrame(rid.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_2 = rid.predict(x_test)
df_2 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_2})
from sklearn import linear_model
lass = linear_model.Lasso(alpha=0.6)
lass.fit(x_train, y_train)
lass.score(x_train, y_train)
lass.score(x_test, y_test)
lass.intercept_
lass.coef_
pd.DataFrame(lass.coef_, cars.columns[:-1], columns=['coeficients']) | code |
105206399/cell_54 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
def check(df):
l = []
columns = df.columns
for col in columns:
dtypes = df[col].dtypes
nunique = df[col].nunique()
sum_null = df[col].isnull().sum()
l.append([col, dtypes, nunique, sum_null])
df_check = pd.DataFrame(l)
df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null']
return df_check
check(df_cars)
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique()
categorical_cols = df_cars.select_dtypes(include=['object']).columns
c = df_cars.corr()
cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']]
x = cars.drop('price', axis=1).values
y = cars['price'].values
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
reg.score(x_train, y_train)
reg.score(x_test, y_test)
reg.intercept_
reg.coef_
pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_1 = reg.predict(x_test)
df_1 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_1})
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
rid.score(x_train, y_train)
rid.score(x_test, y_test)
rid.intercept_
rid.coef_
pd.DataFrame(rid.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_2 = rid.predict(x_test)
df_2 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_2})
plt.figure(figsize=(10, 8))
plt.plot(df_2[:50])
plt.legend(['Actualy', 'predicted']) | code |
105206399/cell_60 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
from sklearn import linear_model
lass = linear_model.Lasso(alpha=0.6)
lass.fit(x_train, y_train)
lass.score(x_train, y_train)
lass.score(x_test, y_test)
lass.intercept_
lass.coef_ | code |
105206399/cell_50 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
def check(df):
l = []
columns = df.columns
for col in columns:
dtypes = df[col].dtypes
nunique = df[col].nunique()
sum_null = df[col].isnull().sum()
l.append([col, dtypes, nunique, sum_null])
df_check = pd.DataFrame(l)
df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null']
return df_check
check(df_cars)
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique()
categorical_cols = df_cars.select_dtypes(include=['object']).columns
c = df_cars.corr()
cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']]
x = cars.drop('price', axis=1).values
y = cars['price'].values
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
reg.score(x_train, y_train)
reg.score(x_test, y_test)
reg.intercept_
reg.coef_
pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_1 = reg.predict(x_test)
df_1 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_1})
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
rid.score(x_train, y_train)
rid.score(x_test, y_test)
rid.intercept_
rid.coef_
pd.DataFrame(rid.coef_, cars.columns[:-1], columns=['coeficients']) | code |
105206399/cell_64 | [
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn import linear_model
from sklearn import linear_model
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df_cars = data.copy()
def check(df):
l = []
columns = df.columns
for col in columns:
dtypes = df[col].dtypes
nunique = df[col].nunique()
sum_null = df[col].isnull().sum()
l.append([col, dtypes, nunique, sum_null])
df_check = pd.DataFrame(l)
df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null']
return df_check
check(df_cars)
df_cars.isnull().sum()
df_cars.duplicated().sum()
df_cars.columns
df_cars.drop('car_ID', axis=1, inplace=True)
df_cars.CarName.unique()
df_cars.CarName.unique()
categorical_cols = df_cars.select_dtypes(include=['object']).columns
c = df_cars.corr()
cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']]
x = cars.drop('price', axis=1).values
y = cars['price'].values
from sklearn.preprocessing import RobustScaler
ro_scaler = RobustScaler()
x_train = ro_scaler.fit_transform(x_train)
x_test = ro_scaler.fit_transform(x_test)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
reg.score(x_train, y_train)
reg.score(x_test, y_test)
reg.intercept_
reg.coef_
pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_1 = reg.predict(x_test)
df_1 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_1})
from sklearn import linear_model
rid = linear_model.Ridge(alpha=0.9)
rid.fit(x_train, y_train)
rid.score(x_train, y_train)
rid.score(x_test, y_test)
rid.intercept_
rid.coef_
pd.DataFrame(rid.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_2 = rid.predict(x_test)
df_2 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_2})
from sklearn import linear_model
lass = linear_model.Lasso(alpha=0.6)
lass.fit(x_train, y_train)
lass.score(x_train, y_train)
lass.score(x_test, y_test)
lass.intercept_
lass.coef_
pd.DataFrame(lass.coef_, cars.columns[:-1], columns=['coeficients'])
y_pred_3 = lass.predict(x_test)
df_3 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_3})
df_3.head(10) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.