path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
74070897/cell_11 | [
"text_plain_output_1.png"
] | from configparser import ConfigParser
from dateutil import parser
from pyproj import Geod
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import os
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import numpy as np
import pandas as pd
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import os
try:
import cPickle as pickle
except:
import pickle
# from utils import g, baseline_time
from pyproj import Geod
from dateutil import parser
global baseline_time
baseline_time = parser.parse('01/01/2013 0:0:0')
g = Geod(ellps='WGS84')
"""
Difference between datasets.py
Change the state variables from [lat (deg), lon (deg), alt (FL), spd (nmi/sec), course (rad)]
to: [lat (deg), lon (deg), alt (FL), lat_dot (deg/sec), lon_dot (deg/sec)]
"""
class DatasetEncoderDecoder:
def __init__(self,
actual_track_datapath,
flight_plan_datapath,
flight_plan_utilize_datapath,
feature_cubes_datapath,
shuffle_or_not = True,
split = True,
batch_size = 128,
**kwargs):
print("State variables as in [Lat, lon, alt, cumDT, lat_spd, lon_spd]")
self.actual_track_datapath = actual_track_datapath
self.flight_plan_datapath = flight_plan_datapath
self.flight_plan_utilize_datapath = flight_plan_utilize_datapath
self.feature_cubes_datapath = feature_cubes_datapath
self.shuffle_or_not = shuffle_or_not
self.split = split
self.batch_size = batch_size
self.dep_lat = kwargs.get('dep_lat', 29.98333333)
self.dep_lon = kwargs.get('dep_lon', -95.33333333)
self.arr_lat = kwargs.get('arr_lat', 42.3666666667)
self.arr_lon = kwargs.get('arr_lon', -70.9666666667)
self.time_dim = kwargs.get('time_dim', False)
self.direct_course = kwargs.get('direct_course', g.inv(self.dep_lon, self.dep_lat, self.arr_lon, self.arr_lat)[0]* np.pi/180)
self.idx = kwargs.get('idx', 0)
self.all_tracks, \
self.all_targets, \
self.all_targets_end, \
self.all_targets_end_neg, \
self.all_seq_lens, \
self.data_mean, \
self.data_std, \
self.all_FP_tracks, \
self.all_seq_lens_FP, \
self.FP_mean, \
self.FP_std, \
self.tracks_time_id_info = self.load_track_data()
self.feature_cubes, self.feature_cubes_mean, self.feature_cubes_std = self.load_feature_cubes()
self.feature_cubes = np.split(self.feature_cubes, np.cumsum(self.all_seq_lens))[:-1]
if self.shuffle_or_not:
self.all_tracks, \
self.all_targets,\
self.all_targets_end,\
self.all_targets_end_neg,\
self.all_seq_lens, \
self.all_FP_tracks, \
self.all_seq_lens_FP, \
self.feature_cubes,\
self.tracks_time_id_info = shuffle(self.all_tracks,
self.all_targets,
self.all_targets_end,
self.all_targets_end_neg,
self.all_seq_lens,
self.all_FP_tracks,
self.all_seq_lens_FP,
self.feature_cubes,
self.tracks_time_id_info,
random_state = 101)
if self.split:
self.train_tracks, \
self.dev_tracks, \
self.train_targets, \
self.dev_targets, \
self.train_targets_end, \
self.dev_targets_end, \
self.train_targets_end_neg, \
self.dev_targets_end_neg, \
self.train_seq_lens, \
self.dev_seq_lens, \
self.train_FP_tracks, \
self.dev_FP_tracks, \
self.train_seq_lens_FP, \
self.dev_seq_lens_FP, \
self.train_feature_cubes, \
self.dev_feature_cubes, \
self.train_tracks_time_id_info, \
self.dev_tracks_time_id_info = train_test_split(self.all_tracks,
self.all_targets,
self.all_targets_end,
self.all_targets_end_neg,
self.all_seq_lens,
self.all_FP_tracks,
self.all_seq_lens_FP,
self.feature_cubes,
self.tracks_time_id_info,
random_state = 101,
train_size = 0.8,
test_size = None)
self.train_tracks = _pad(self.train_tracks, self.train_seq_lens)
self.train_targets = _pad(self.train_targets, self.train_seq_lens)
self.train_targets_end = _pad(self.train_targets_end, self.train_seq_lens)
self.train_targets_end_neg = _pad(self.train_targets_end_neg, self.train_seq_lens)
self.train_feature_cubes = _pad(self.train_feature_cubes, self.train_seq_lens)
self.n_train_data_set = self.train_tracks.shape[0]
def __str__(self):
return 'Dataset Class to Conduct Training Procedure'
def _calc_latlon_spd(self, track_dataframe):
CenterTraj = track_dataframe[['FID', 'Lat', 'Lon', 'DT']]
# CenterTraj.loc[:, 'azimuth'] = last_pnt
tmp_df = CenterTraj.shift(-1)
latlon_spd = np.divide((tmp_df[['Lat', 'Lon']].values - CenterTraj[['Lat', 'Lon']].values), tmp_df.DT.values.reshape(-1,1))
tmp_tail_idx = CenterTraj.groupby("FID")['Lat'].tail(1).index
latlon_spd[tmp_tail_idx, :] = 0.
latlon_spd[np.isnan(latlon_spd)] = 0.
latlon_spd[np.isinf(latlon_spd)] = 0.
return latlon_spd
def load_track_data(self):
track_data = pd.read_csv(self.actual_track_datapath, header = 0, index_col = 0)
# FID, Elap_Time, Lat, Lon, Alt, DT, Speed (nmi/sec), Elap_Time_Diff (sec), course (rad)
# calculate lat long speed
latlon_spd = self._calc_latlon_spd(track_data)
track_data.loc[:, 'Lat_spd'] = latlon_spd[:, 0]
track_data.loc[:, 'Lon_spd'] = latlon_spd[:, 1]
# merge with flight plans
FP_track = pd.read_csv(self.flight_plan_datapath)
FP_utlize = pd.read_csv(self.flight_plan_utilize_datapath, header = 0)
# subtract departure airport's [lat, lon] from flight plan (FP) track and standardize
FP_track[['LATITUDE', 'LONGITUDE']] -= np.array([self.dep_lat, self.dep_lon])
avg_FP = FP_track[['LATITUDE', 'LONGITUDE']].mean().values
std_err_FP = FP_track[['LATITUDE', 'LONGITUDE']].std().values
FP_track[['LATITUDE', 'LONGITUDE']] = (FP_track[['LATITUDE', 'LONGITUDE']] - avg_FP)/std_err_FP
# merge track data with FP utilize data
track_data_with_FP_id = track_data.merge(FP_utlize, left_on = 'FID', right_on = 'FID', how = 'inner')
# process FP tracks
# Long format to wide format
FP_track_wide = FP_track.groupby('FLT_PLAN_ID').apply(lambda x: x[['LATITUDE', 'LONGITUDE']].values.reshape(1, -1)).reset_index()
FP_track_wide.columns = ['FLT_PLAN_ID', 'FP_tracks']
FP_track_wide['seq_len'] = FP_track_wide.FP_tracks.apply(lambda x: x.shape[1]//2)
# merge track data with wide form of FP tracks
track_data_with_FP = track_data_with_FP_id.merge(FP_track_wide, left_on='FLT_PLAN_ID', right_on = 'FLT_PLAN_ID')
seq_length_tracks = track_data_with_FP.groupby('FID').FLT_PLAN_ID.count().values.astype(np.int32)
track_data_with_FP['cumDT'] = track_data_with_FP.groupby('FID').DT.transform(pd.Series.cumsum)
tracks = track_data_with_FP[['Lat', 'Lon', 'Alt', 'cumDT', 'Lat_spd', 'Lon_spd']].values.astype(np.float32)
print('use cumDT')
# print('Use absolute time elapsed from: ', baseline_time)
# use delta lat and delta lon
tracks = tracks - np.array([self.dep_lat, self.dep_lon, 0., 0., 0., 0.])
avg = tracks.mean(axis = 0)
std_err = tracks.std(axis = 0)
tracks = (tracks - avg)/std_err
tracks_split = np.split(tracks, np.cumsum(seq_length_tracks))[:-1]
# add the arrival information to construct the target sequence
targets_split, targets_end_split, targets_end_split_neg = self._construct_target(tracks_split, avg, std_err, self.time_dim)
FP_track_order = track_data_with_FP.groupby('FID')[['FID', 'Elap_Time', 'FP_tracks', 'seq_len']].head(1)
seq_length_FP = FP_track_order.seq_len.values.astype(np.int32)
FP_tracks_split = FP_track_order.FP_tracks.values
FP_track_order['Elap_Time'] = pd.to_datetime(FP_track_order['Elap_Time'], errors = 'coerce')
tracks_time_id_info = FP_track_order[['FID', 'Elap_Time']].values
FP_tracks_split = _pad_and_flip_FP(FP_tracks_split, seq_length_FP)
# all standardized
return tracks_split, targets_split, targets_end_split, targets_end_split_neg, seq_length_tracks, avg, std_err, FP_tracks_split, seq_length_FP, avg_FP, std_err_FP, tracks_time_id_info
def _construct_target(self, splitted_tracks, avg, std_err, time_dim = False):
tmp_list = []
tmp_end_list = []
tmp_end_list_neg = []
for target_seq in splitted_tracks:
# print(target_seq.shape)
# print(avg.shape)
# print(std_err.shape)
if time_dim:
tmp_list.append(np.concatenate((target_seq[1:, :], (np.array([[self.arr_lat - self.dep_lat,
self.arr_lon - self.dep_lon,
0,
0,
0,
0]]) - avg)/std_err), axis = 0))
else:
tmp_list.append(np.concatenate((target_seq[1:, [0,1,2,4,5]], (np.array([[self.arr_lat - self.dep_lat,
self.arr_lon - self.dep_lon,
0,
0,
0]]) - avg[[0,1,2,4,5]])/std_err[[0,1,2,4,5]]), axis = 0))
tmp_arr = np.zeros((target_seq.shape[0], 1))
tmp_arr[-1, 0] = 1.
tmp_end_list.append(tmp_arr)
tmp_end_list_neg.append(1 - tmp_arr)
return tmp_list, tmp_end_list, tmp_end_list_neg
def load_feature_cubes(self):
feature_cubes_pointer = np.load(self.feature_cubes_datapath)
# feature_grid = feature_cubes_pointer['feature_grid']
# query_idx = feature_cubes_pointer['query_idx']
feature_cubes = feature_cubes_pointer['feature_cubes']
# feature_cubes_grid = feature_cubes_pointer['feature_grid'] - np.array([self.dep_lon, self.dep_lat])
# feature_cubes_grid = feature_cubes_grid.reshape(-1, 20, 20, 2)
# feature_cubes = np.concatenate((feature_cubes, feature_cubes_grid), axis = -1)
# feature_cubes have shape of [N_points, 20, 20, 4]
# Standardize the features
feature_cubes_mean = np.mean(feature_cubes, axis = 0)
feature_cubes_std = np.std(feature_cubes, axis = 0)
# Do NOT standardize the binary layer!
feature_cubes_mean[:, :, 0] = 0.
feature_cubes_std[:, :, 0] = 1.
feature_cubes_norm = (feature_cubes - feature_cubes_mean)/feature_cubes_std # shape of [N_point, 20, 20, n_channels]
return feature_cubes_norm, feature_cubes_mean, feature_cubes_std
def next_batch(self):
# n_sample = self.n_train_data_set
train_dev_test = 'train'
idx_list = np.arange(self.n_train_data_set)
if self.idx >= self.n_train_data_set:
self.idx = 0
if self.shuffle_or_not:
idx_list = shuffle(idx_list)
if train_dev_test == 'train':
endidx = min(self.idx + self.batch_size, self.n_train_data_set)
batch_seq_lens = self.train_seq_lens[idx_list[self.idx:endidx]]
batch_inputs = self.train_tracks[idx_list[self.idx:endidx], :, :]
batch_targets = self.train_targets[idx_list[self.idx:endidx], :, :]
batch_targets_end = self.train_targets_end[idx_list[self.idx:endidx], :, :]
batch_targets_end_neg = self.train_targets_end_neg[idx_list[self.idx:endidx], :, :]
batch_seq_lens_FP = self.train_seq_lens_FP[idx_list[self.idx:endidx]]
batch_inputs_FP = self.train_FP_tracks[idx_list[self.idx:endidx], :, :]
batch_inputs_feature_cubes = self.train_feature_cubes[self.idx:endidx, :, :, :, :]
self.idx += self.batch_size
return batch_inputs, batch_targets, batch_targets_end, batch_targets_end_neg, batch_seq_lens, batch_inputs_FP, batch_seq_lens_FP, batch_inputs_feature_cubes
#######################################################################################################
#######################################################################################################
#######################################################################################################
#######################################################################################################
#######################################################################################################
#######################################################################################################
# from utils_features import match_wind_fname, match_ncwf_fname, flight_track_feature_generator, proxilvl
from utils_features import match_wind_fname, match_ncwf_fname
class DatasetSample:
def __init__(self,
train_track_mean,
train_track_std,
train_fp_mean,
train_fp_std,
feature_cubes_mean,
feature_cubes_std,
ncwf_data_rootdir = '../../DATA/NCWF/gridded_storm_hourly/',
test_track_dir = '../../DATA/DeepTP/test_flight_tracks.csv',
test_fp_dir = '../../DATA/DeepTP/test_flight_plans.csv',
flight_plan_util_dir = '../../DATA/DeepTP/test_flight_plans_util.CSV',
wind_data_rootdir = '../../DATA/filtered_weather_data/namanl_small_npz/',
grbs_common_info_dir = '/media/storage/DATA/filtered_weather_data/grbs_common_info.npz',
grbs_lvl_dict_dir = '/media/storage/DATA/filtered_weather_data/grbs_level_common_info.pkl',
grbs_smallgrid_kdtree_dir = '/media/storage/DATA/filtered_weather_data/grbs_smallgrid_kdtree.pkl',
ncwf_arr_dir = '../../DATA/NCWF/gridded_storm.npz',
ncwf_alt_dict_dir = '../../DATA/NCWF/alt_dict.pkl',
large_load = False,
weather_feature = True,
**kwargs):
self.train_track_mean = train_track_mean
self.train_track_std = train_track_std
self.train_fp_mean = train_fp_mean
self.train_fp_std = train_fp_std
self.train_feature_cubes_mean = feature_cubes_mean
self.train_feature_cubes_std = feature_cubes_std
self.ncwf_data_rootdir = ncwf_data_rootdir
self.large_load = large_load
self.weather_feature = weather_feature
self.dep_lat = kwargs.get('dep_lat', 29.98333333)
self.dep_lon = kwargs.get('dep_lon', -95.33333333)
self.arr_lat = kwargs.get('arr_lat', 42.3666666667)
self.arr_lon = kwargs.get('arr_lon', -70.9666666667)
self.direct_course = kwargs.get('direct_course', g.inv(self.dep_lon, self.dep_lat, self.arr_lon, self.arr_lat)[0] * np.pi/180)
super().__init__(flight_track_dir = test_track_dir,
flight_plan_dir = test_fp_dir,
flight_plan_util_dir = flight_plan_util_dir,
wind_data_rootdir = wind_data_rootdir,
grbs_common_info_dir = grbs_common_info_dir,
grbs_lvl_dict_dir = grbs_lvl_dict_dir,
grbs_smallgrid_kdtree_dir = grbs_smallgrid_kdtree_dir,
ncwf_arr_dir = ncwf_arr_dir,
ncwf_alt_dict_dir = ncwf_alt_dict_dir,
load_ncwf_arr = False,
downsample = False)
def __str__(self):
return 'Dataset Class to Conduct Sampling Procedure'
def _calc_latlon_spd(self, track_dataframe):
CenterTraj = track_dataframe[['FID', 'Lat', 'Lon', 'DT']]
# CenterTraj.loc[:, 'azimuth'] = last_pnt
tmp_df = CenterTraj.shift(-1)
latlon_spd = np.divide((tmp_df[['Lat', 'Lon']].values - CenterTraj[['Lat', 'Lon']].values), tmp_df.DT.values.reshape(-1,1))
tmp_tail_idx = CenterTraj.groupby("FID")['Lat'].tail(1).index
latlon_spd[tmp_tail_idx, :] = 0.
latlon_spd[np.isnan(latlon_spd)] = 0.
latlon_spd[np.isinf(latlon_spd)] = 0.
return latlon_spd
def _count_unordered_seq_length(self, count_array):
fp_seq_length = []
_tmp_ = []
j = -1
for i in count_array:
if i not in _tmp_:
_tmp_.append(i)
fp_seq_length.append(1)
j += 1
else:
fp_seq_length[j] += 1
return np.array(fp_seq_length).astype(np.int32)
def process_test_tracks(self):
flight_tracks = self.flight_track_preprocess(self.ori_flight_tracks)
flight_tracks['cumDT'] = flight_tracks.groupby('FID').DT.transform(pd.Series.cumsum)
# calculate lat long speed
latlon_spd = self._calc_latlon_spd(flight_tracks)
flight_tracks.loc[:, 'Lat_spd'] = latlon_spd[:, 0]
flight_tracks.loc[:, 'Lon_spd'] = latlon_spd[:, 1]
flight_tracks = flight_tracks.groupby('FID').head(20).reset_index(drop = True)
# multiple tracks must have the same length for now
tracks = flight_tracks[['Lat', 'Lon', 'Alt', 'cumDT', 'Lat_spd', 'Lon_spd']].values.astype(np.float32)
# print('Using Elap_Time_Diff as time inputs')
# subtract depature's lat lon & course
# normalize tracks using train mean and train std
tracks = self.normalize_flight_tracks(tracks)
# seq_length = flight_tracks.groupby('FID').Lat.count().values.astype(np.int32)
seq_length = self._count_unordered_seq_length(flight_tracks.FID.values)
tracks_split = np.split(tracks, np.cumsum(seq_length))[:-1]
tracks_split = np.array(tracks_split)
# flight plans
fp_tracks = self.ori_flight_plans[['LATITUDE', 'LONGITUDE']].values.astype(np.float32)
# first substract from the lat lon of departure airport
# then normalize using the training set mean and std
fp_tracks = (fp_tracks - np.array([self.dep_lat, self.dep_lon]) - self.train_fp_mean)/self.train_fp_std
fp_seq_length = self._count_unordered_seq_length(self.ori_flight_plans.FLT_PLAN_ID.values)
# pad and flip
fp_tracks_split = _pad_and_flip_FP(np.array(np.split(fp_tracks, np.cumsum(fp_seq_length))[:-1]), fp_seq_length)
return fp_tracks_split, tracks_split, fp_seq_length, seq_length, flight_tracks
def normalize_flight_tracks(self,
unnormalized_tracks):
return (unnormalized_tracks - np.array([self.dep_lat, self.dep_lon, 0, 0, 0, 0]) - self.train_track_mean)/self.train_track_std
def unnormalize_flight_tracks(self,
normalized_tracks):
return normalized_tracks * self.train_track_std + self.train_track_mean + np.array([self.dep_lat, self.dep_lon, 0, 0, 0, 0])
def unnormalize_flight_track_cov(self,
normalize_flight_track_cov):
return normalize_flight_track_cov * (self.train_track_std[[0, 1, 2, 4, 5]] ** 2)
def normalize_feature_cubes(self,
unnormalized_feature_cubes):
return (unnormalized_feature_cubes - self.train_feature_cubes_mean)/self.train_feature_cubes_std
# @Override parent method _generate_feature_cube
def _generate_feature_cube(self,
flight_tracks,
feature_grid_query_idx,
nx,
ny,
wx_alt_buffer = 20):
"""
Given the flight track data (with agumented columns), generate wind and tempr cube for each track point
use groupby function to speed up
return a numpy array (tensor) with shape [None, 20, 20, 4]
first layer: ncwf weather
second layer: temperature
third layer: u wind
fourth layer: v wind
"""
feature_cubes = np.zeros(shape = (feature_grid_query_idx.shape[0], nx, ny, 4), dtype = np.float32)
#######################################################################################################
self.wx_testdata_holder = []
self.wx_gpidx_holder = []
if not self.weather_feature:
feature_cubes[:, :, :, 0] = 0
else:
# append all weather data into one so that later matching will be more efficient
groups = flight_tracks[['FID', 'wx_fname', 'wx_alt']].groupby(['wx_fname', 'wx_alt'])
ng = groups.ngroups
print('Extract ncwf convective weather from %d groups ...'%ng)
for gpidx, gp in groups:
if gpidx[0] not in self.wx_gpidx_holder:
self.wx_gpidx_holder.append(gpidx[0])
wx_data_single = self._load_ncwf_low_memory(gpidx[0])
self.wx_testdata_holder.append(wx_data_single) # each element is the ncwf array with the order of wx_fname
else:
wx_data_single = self.wx_testdata_holder[self.wx_gpidx_holder.index(gpidx[0])]
# nan has been automatically dropped
wx_alt_cover = self.wx_unique_alt[(self.wx_unique_alt >= (gpidx[1] - wx_alt_buffer)) & \
(self.wx_unique_alt <= (gpidx[1] + wx_alt_buffer))]
wx_alt_idxmin = self.wx_alt_dict[wx_alt_cover.min()]
wx_alt_idxmax = self.wx_alt_dict[wx_alt_cover.max()] + 1
wx_base = np.any(wx_data_single[wx_alt_idxmin: wx_alt_idxmax, :][:, feature_grid_query_idx[gp.index]], axis = 0).astype(np.float32).reshape(-1, nx, ny)
feature_cubes[gp.index, :, :, 0] = wx_base
print('Finished ncwf wx extraction!\n')
#######################################################################################################
groups = flight_tracks[['FID', 'wind_fname', 'levels']].groupby(['wind_fname', 'levels'])
ng = groups.ngroups
self.uwind_testdata_holder = []
self.vwind_testdata_holder = []
self.tempr_testdata_holder = []
self.wind_gpidx_holder = []
print('Extract wind/ temperature from %d groups ...'%ng)
jj = -1
for gpidx, gp in groups:
jj += 1
if self.large_load:
wind_npz = np.load(os.path.join(self.wind_data_rootdir, gpidx[0]))
tmp_uwind = wind_npz['uwind']
tmp_vwind = wind_npz['vwind']
tmp_tempr = wind_npz['tempr']
else:
if gpidx[0] not in self.wind_gpidx_holder:
self.wind_gpidx_holder.append(gpidx[0])
tmp_uwind, tmp_vwind, tmp_tempr = self._load_wind_low_memory(gpidx[0])
self.tempr_testdata_holder.append(tmp_tempr)
self.uwind_testdata_holder.append(tmp_uwind)
self.vwind_testdata_holder.append(tmp_vwind)
else:
tmp_tempr = self.tempr_testdata_holder[self.wind_gpidx_holder.index(gpidx[0])]
tmp_uwind = self.uwind_testdata_holder[self.wind_gpidx_holder.index(gpidx[0])]
tmp_vwind = self.vwind_testdata_holder[self.wind_gpidx_holder.index(gpidx[0])]
tempr_base = tmp_tempr[self.lvls_dict[gpidx[1]]][feature_grid_query_idx[gp.index]].reshape(-1, nx,ny)
uwind_base = tmp_uwind[self.lvls_dict[gpidx[1]]][feature_grid_query_idx[gp.index]].reshape(-1, nx,ny)
vwind_base = tmp_vwind[self.lvls_dict[gpidx[1]]][feature_grid_query_idx[gp.index]].reshape(-1, nx,ny)
feature_cubes[gp.index, :, :, 1] = tempr_base
feature_cubes[gp.index, :, :, 2] = uwind_base
feature_cubes[gp.index, :, :, 3] = vwind_base
print('Finished wind/ temperature extraction!\n')
return feature_cubes
def _load_ncwf_low_memory(self, ncwf_fname):
return np.load(os.path.join(self.ncwf_data_rootdir, ncwf_fname))['ncwf_arr']
def generate_test_track_feature_cubes(self,
flight_tracks,
shift_xleft = 0,
shift_xright = 2,
shift_yup = 1,
shift_ydown = 1,
nx = 20,
ny = 20):
feature_cubes, feature_grid, query_idx = self.feature_arr_generator(flight_tracks = flight_tracks,
shift_xleft = shift_xleft,
shift_xright = shift_xright,
shift_yup = shift_yup,
shift_ydown = shift_ydown,
nx = nx,
ny = ny)
# feature_grid = feature_grid - np.array([self.dep_lon, self.dep_lat])
# feature_grid = feature_grid.reshape(-1, 20, 20, 2)
# feature_cubes = np.concatenate((feature_cubes, feature_grid), axis = -1)
feature_cubes = self.normalize_feature_cubes(feature_cubes)
return feature_cubes, feature_grid, query_idx
def generate_predicted_pnt_feature_cube(self,
predicted_final_track,
known_flight_deptime,
shift_xleft = 0,
shift_xright = 2,
shift_yup = 1,
shift_ydown = 1,
nx = 20,
ny = 20):
"""
predicted_final_track has the shape of [n_seq * n_mixture^i, n_time + t, n_input].
The last axis coresponds to [Lat, Lon, Alt, cumDT, Speed, course]
known_flight_deptime is a np array that contains
FID, Elap_Time (depature time)
wind_file_info is a dictionary of file time tree (kdtree) and an array of time objects
"""
predicted_final_track = self.unnormalize_flight_tracks(predicted_final_track[:, -2:, :])
# print(predicted_final_track[0, -1, :4])
azimuth_arr = g.inv(predicted_final_track[:, -2, 1],
predicted_final_track[:, -2, 0],
predicted_final_track[:, -1, 1],
predicted_final_track[:, -1, 0])[0]
# Step 0: construct tmp matching dataframe that contains:
# elap_time_diff, azimuth, levels, wx_alt, wind_fname, wx_fname
predicted_matched_info = np.empty((predicted_final_track.shape[0], 13))
predicted_matched_info = pd.DataFrame(predicted_matched_info,
columns = ['FID',
'Lat',
'Lon',
'Alt',
'cumDT',
'Lat_spd',
'Lon_spd',
'Elap_Time_Diff',
'azimuth',
'levels',
'wx_alt',
'wind_fname',
'wx_fname'])
predicted_matched_info.loc[:, ['Lat',
'Lon',
'Alt',
'cumDT',
'Lat_spd',
'Lon_spd']] = predicted_final_track[:, -1, :]
predicted_matched_info.loc[:, 'azimuth'] = azimuth_arr * np.pi/180
# Step 1: map cumDT to Elaps_time
known_flight_deptime_diff = (known_flight_deptime[:, 1] - baseline_time)
known_flight_deptime_diff = np.array([item.total_seconds() for item in known_flight_deptime_diff])
multiplier = predicted_matched_info.shape[0]//known_flight_deptime_diff.shape[0]
deptime = np.repeat(known_flight_deptime_diff, repeats = multiplier, axis = 0)
FIDs = np.repeat(known_flight_deptime[:, 0], repeats = multiplier, axis = 0)
elap_time_diff = predicted_matched_info.loc[:, 'cumDT'].values + deptime
predicted_matched_info.loc[:, 'Elap_Time_Diff'] = elap_time_diff
predicted_matched_info.loc[:, 'FID'] = FIDs
# Step 2: Map Elaps_time with wx_fname and wind_fname
# match with wind/ temperature fname
wind_query_dist, wind_query_idx = self.wind_ftime_tree.query(elap_time_diff.reshape(-1, 1), p = 1, distance_upper_bound = 3600*3)
wind_valid_query = wind_query_dist < self.wind_time_objs.shape[0] # binary array
predicted_matched_info.loc[wind_valid_query, 'wind_fname'] = self.wind_time_objs[wind_query_idx[wind_valid_query], 0]
predicted_matched_info.loc[~wind_valid_query, 'wind_fname'] = np.nan
# match with ncwf idx
wx_query_dist, wx_query_idx = self.wx_ftime_tree.query(elap_time_diff.reshape(-1, 1), p = 1, distance_upper_bound = 3600)
wx_valid_query = wx_query_dist < self.wx_fname_hourly.shape[0] # binary array
predicted_matched_info.loc[wx_valid_query, 'wx_fname'] = self.wx_fname_hourly[wx_query_idx[wx_valid_query]]
predicted_matched_info.loc[~wx_valid_query, 'wx_fname'] = np.nan
# Step 3: calculate wind_levels & ncwf_levels
predicted_matched_info.loc[:, 'levels'] = predicted_matched_info['Alt'].apply(lambda x: proxilvl(x*100, self.lvls_dict))
predicted_matched_info.loc[:, 'wx_alt'] = predicted_matched_info['Alt']//10
# Step 4: generate feature cube
feature_cubes, feature_grid, _ = self.feature_arr_generator(flight_tracks = predicted_matched_info,
shift_xleft = shift_xleft,
shift_xright = shift_xright,
shift_yup = shift_yup,
shift_ydown = shift_ydown,
nx = nx,
ny = ny)
# feature_grid = feature_grid - np.array([self.dep_lon, self.dep_lat])
# feature_grid = feature_grid.reshape(-1, 20, 20, 2)
# feature_cubes = np.concatenate((feature_cubes, feature_grid), axis = -1)
feature_cubes = self.normalize_feature_cubes(feature_cubes)
feature_cubes = feature_cubes.reshape(-1, 1, nx, ny, 4)
return feature_cubes, feature_grid, predicted_matched_info
def reshape_feature_cubes(self,
feature_cubes,
track_length):
# track_length should be a list of integers that contains the length of each test track
feature_cubes = np.array(np.split(feature_cubes, np.cumsum(track_length))[:-1])
return feature_cubes
def _pad(inputs, inputs_len):
# inputs is a list of np arrays
# inputs_len is a np array
_zero_placeholder = ((0,0),) * (len(inputs[0].shape) - 1)
max_len = inputs_len.max()
_inputs = []
i = 0
for _input in inputs:
_tmp_zeros = ((0, max_len - inputs_len[i]), *_zero_placeholder)
_inputs.append(np.pad(_input, _tmp_zeros, 'constant', constant_values = 0))
i+=1
return np.asarray(_inputs)
def _pad_and_flip_FP(inputs, inputs_len):
# reverse every flight plan
max_len = inputs_len.max()
_inputs = []
i = 0
for _input in inputs:
_inputs.append(np.pad(_input.reshape(-1,2)[::-1], ((0, max_len - inputs_len[i]), (0,0)), 'constant', constant_values = 0))
i+=1
return np.asarray(_inputs)
import numpy as np
import tensorflow as tf
import os
from configparser import ConfigParser
import matplotlib.pyplot as plt
class visual_graph:
def __init__(self, conf_path, restored_model_path):
self.restored_model_path = restored_model_path
self.conf_path = conf_path
self.load_configs()
def load_configs(self):
parser = ConfigParser(os.environ)
parser.read(self.conf_path)
config_header = 'nn'
self.n_input = parser.getint(config_header, 'n_input')
self.n_channels = parser.getint('convolution', 'n_channels')
self.n_controled_var = parser.getint('lstm', 'n_controled_var')
self.n_encode = parser.getint(config_header, 'n_encode')
self.state_size = parser.getint('lstm', 'n_cell_dim')
self.n_layer = parser.getint('lstm', 'n_lstm_layers')
self.batch_size = parser.getint(config_header, 'batch_size')
def define_placeholder(self):
self.input_encode_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_encode], name='encode_tensor')
self.seq_len_encode = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_length_encode')
self.input_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_input, self.n_input, self.n_channels], name='decode_feature_map')
self.input_decode_coords_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_controled_var], name='decode_coords')
self.target = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_controled_var], name='target')
self.target_end = tf.placeholder(dtype=tf.float32, shape=[None, None, 1], name='target_end')
self.target_end_neg = tf.placeholder(dtype=tf.float32, shape=[None, None, 1], name='target_end_neg')
self.seq_length = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_length_decode')
return
def launchGraph(self):
self.define_placeholder()
self.MODEL = LSTM_model(conf_path=self.conf_path, batch_x=self.input_encode_tensor, seq_length=self.seq_len_encode, n_input=self.n_encode, batch_x_decode=self.input_tensor, batch_xcoords_decode=self.input_decode_coords_tensor, seq_length_decode=self.seq_length, n_input_decode=self.n_input, target=self.target, train=False, weight_summary=False)
return
def feed_fwd_convlayer(self, feed_input):
with tf.device('/cpu:0'):
self.graph = tf.Graph()
self.launchGraph()
self.sess = tf.Session()
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.restored_model_path)
self.sess.graph.finalize()
self.weights = self._return_weights()
conv1_out, conv2_out, conv3_out = self._feed_fwd_convlayer(feed_input)
self.sess.close()
return (conv1_out, conv2_out, conv3_out)
def _return_weights(self):
weight_list = tf.trainable_variables()
weights = {}
for v in weight_list:
weights[v.name] = self.sess.run(v)
return weights
def _feed_fwd_convlayer(self, feed_input):
conv1_out = self.sess.run(self.MODEL.conv1, feed_dict={self.input_tensor: feed_input})
conv2_out = self.sess.run(self.MODEL.conv2, feed_dict={self.input_tensor: feed_input})
conv3_out = self.sess.run(self.MODEL.conv3, feed_dict={self.input_tensor: feed_input})
return (conv1_out, conv2_out, conv3_out)
def visualize_raw_weights(weight_var, fig_size=(8, 4)):
n_layers = weight_var.shape[3]
n_channels = weight_var.shape[2]
fig, axs = plt.subplots(n_channels, n_layers, figsize=fig_size, facecolor='w', edgecolor='k')
axs = axs.ravel()
for i in range(n_channels):
for j in range(n_layers):
axs[n_layers * i + j].imshow(weight_var[:, :, i, j], cmap='bwr', vmax=weight_var.max(), vmin=weight_var.min())
axs[n_layers * i + j].set_axis_off()
plt.show()
return fig
def visualize_conv_layers(conv_layer, nrow, ncol, fig_size):
print(conv_layer.shape)
fig, axs = plt.subplots(nrow, ncol, figsize=fig_size, facecolor='w', edgecolor='k')
fig.subplots_adjust(wspace=0.01, hspace=0.01)
axs = axs.ravel()
for i in range(nrow):
for j in range(ncol):
axs[ncol * i + j].imshow(conv_layer[j, :, :, i], cmap='bwr', vmax=conv_layer[:, :, :, i].max(), vmin=conv_layer[:, :, :, i].min(), origin='lower')
axs[ncol * i + j].set_axis_off()
plt.show()
return fig
'\nExample Code:\n'
"\ntf.reset_default_graph()\nrestored_model_path = 'visual_network/model.ckpt-99'\nconfig_path = 'configs/encoder_decoder_nn.ini'\nvisual_graph_class = visual_graph(config_path, restored_model_path)\nvisual_graph_class.restore_model()\nweights = visual_graph_class.weights\nvisualize_raw_weights(weight_var=weights['wc1:0'], fig_size = (8, 2))\nvisualize_raw_weights(weight_var=weights['wc2:0'], fig_size = (8,4))\nvisualize_raw_weights(weight_var=weights['wc3:0'], fig_size = (8,4))\n" | code |
32062775/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
test.fillna(test.mean(), inplace=True)
test | code |
32062775/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
gender_submission | code |
32062775/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test | code |
32062775/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
train.fillna(train.mean(), inplace=True)
train | code |
32062775/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
train.fillna(train.mean(), inplace=True)
X_train = train.drop(['Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
y_train = train['Survived']
y_train | code |
32062775/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32062775/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
test.fillna(test.mean(), inplace=True)
X_test = test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
y_test = gender_submission['Survived']
y_test | code |
32062775/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
train.fillna(train.mean(), inplace=True)
X_train = train.drop(['Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
y_train = train['Survived']
X_train.info() | code |
32062775/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
test.fillna(test.mean(), inplace=True)
X_test = test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
y_test = gender_submission['Survived']
X_test.info() | code |
32062775/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
train.fillna(train.mean(), inplace=True)
test.fillna(test.mean(), inplace=True)
X_train = train.drop(['Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
y_train = train['Survived']
X_test = test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
y_test = gender_submission['Survived']
steps = [('scaler', StandardScaler()), ('SVM', SVC())]
pipeline = Pipeline(steps)
parameters = {'SVM__C': [1, 10, 100], 'SVM__gamma': [0.1, 0.01]}
cv = GridSearchCV(pipeline, parameters, cv=3)
cv.fit(X_train, y_train)
y_pred = cv.predict(X_test)
my_submission = pd.DataFrame({'PassengerId': X_test.PassengerId, 'Survived': y_pred})
my_submission | code |
32062775/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
sex_and_embark_train = pd.get_dummies(train[['Sex', 'Embarked']])
sex_and_embark_test = pd.get_dummies(test[['Sex', 'Embarked']])
train = train.drop(['Sex', 'Embarked'], axis=1)
test = test.drop(['Sex', 'Embarked'], axis=1)
train = train.join(sex_and_embark_train)
test = test.join(sex_and_embark_test)
train.fillna(train.mean(), inplace=True)
test.fillna(test.mean(), inplace=True)
X_train = train.drop(['Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
y_train = train['Survived']
X_test = test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
y_test = gender_submission['Survived']
steps = [('scaler', StandardScaler()), ('SVM', SVC())]
pipeline = Pipeline(steps)
parameters = {'SVM__C': [1, 10, 100], 'SVM__gamma': [0.1, 0.01]}
cv = GridSearchCV(pipeline, parameters, cv=3)
cv.fit(X_train, y_train)
y_pred = cv.predict(X_test)
print('Accuracy: {}'.format(cv.score(X_test, y_test)))
print(classification_report(y_test, y_pred))
print('Tuned Model Parameters: {}'.format(cv.best_params_)) | code |
32062775/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train = pd.read_csv('/kaggle/input/titanic/train.csv')
train | code |
1008349/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
gTemp = pd.read_csv('../input/GlobalTemperatures.csv')
gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv')
gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv')
gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMajorCity.csv')
gTempCity = pd.read_csv('../input/GlobalLandTemperaturesByCity.csv')
gTemp.head(5) | code |
1008349/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
gTemp = pd.read_csv('../input/GlobalTemperatures.csv')
gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv')
gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv')
gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMajorCity.csv')
gTempCity = pd.read_csv('../input/GlobalLandTemperaturesByCity.csv')
gTemp['Year'] | code |
320942/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', header=0)
df[df['Age'] > 60][['Sex', 'Pclass', 'Age', 'Survived']] | code |
320942/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', header=0)
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
df.head(3) | code |
320942/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', header=0)
df.head(3) | code |
320942/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
320942/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', header=0)
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
df.head(3) | code |
320942/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', header=0)
df.info() | code |
320942/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pylab as P
df = pd.read_csv('../input/train.csv', header=0)
import pylab as P
df['Age'].hist()
P.show() | code |
50223292/cell_9 | [
"text_plain_output_100.png",
"text_plain_output_334.png",
"text_plain_output_770.png",
"text_plain_output_743.png",
"text_plain_output_673.png",
"text_plain_output_445.png",
"text_plain_output_640.png",
"text_plain_output_201.png",
"text_plain_output_586.png",
"text_plain_output_261.png",
"text_plain_output_775.png",
"text_plain_output_565.png",
"text_plain_output_522.png",
"text_plain_output_84.png",
"text_plain_output_624.png",
"text_plain_output_521.png",
"text_plain_output_322.png",
"text_plain_output_769.png",
"text_plain_output_205.png",
"text_plain_output_693.png",
"text_plain_output_511.png",
"text_plain_output_608.png",
"text_plain_output_271.png",
"text_plain_output_56.png",
"text_plain_output_475.png",
"text_plain_output_158.png",
"text_plain_output_455.png",
"text_plain_output_223.png",
"text_plain_output_218.png",
"text_plain_output_264.png",
"text_plain_output_715.png",
"text_plain_output_282.png",
"text_plain_output_579.png",
"text_plain_output_629.png",
"text_plain_output_396.png",
"text_plain_output_287.png",
"text_plain_output_232.png",
"text_plain_output_181.png",
"text_plain_output_137.png",
"text_plain_output_139.png",
"text_plain_output_362.png",
"text_plain_output_35.png",
"text_plain_output_697.png",
"text_plain_output_501.png",
"text_plain_output_593.png",
"text_plain_output_258.png",
"text_plain_output_685.png",
"text_plain_output_452.png",
"text_plain_output_130.png",
"text_plain_output_598.png",
"text_plain_output_490.png",
"text_plain_output_449.png",
"text_plain_output_462.png",
"text_plain_output_117.png",
"text_plain_output_286.png",
"text_plain_output_367.png",
"text_plain_output_750.png",
"text_plain_output_262.png",
"text_plain_output_278.png",
"text_plain_output_588.png",
"text_plain_output_395.png",
"text_plain_output_617.png",
"text_plain_output_254.png",
"text_plain_output_307.png",
"text_plain_output_570.png",
"text_plain_output_674.png",
"text_plain_output_98.png",
"text_plain_output_399.png",
"text_plain_output_671.png",
"text_plain_output_718.png",
"text_plain_output_236.png",
"text_plain_output_195.png",
"text_plain_output_756.png",
"text_plain_output_678.png",
"text_plain_output_688.png",
"text_plain_output_471.png",
"text_plain_output_219.png",
"text_plain_output_614.png",
"text_plain_output_768.png",
"text_plain_output_420.png",
"text_plain_output_514.png",
"text_plain_output_485.png",
"text_plain_output_237.png",
"text_plain_output_43.png",
"text_plain_output_284.png",
"text_plain_output_187.png",
"text_plain_output_309.png",
"text_plain_output_576.png",
"text_plain_output_78.png",
"text_plain_output_143.png",
"text_plain_output_106.png",
"text_plain_output_37.png",
"text_plain_output_138.png",
"text_plain_output_670.png",
"text_plain_output_544.png",
"text_plain_output_192.png",
"text_plain_output_426.png",
"text_plain_output_184.png",
"text_plain_output_477.png",
"text_plain_output_274.png",
"text_plain_output_172.png",
"text_plain_output_664.png",
"text_plain_output_716.png",
"text_plain_output_627.png",
"text_plain_output_613.png",
"text_plain_output_736.png",
"text_plain_output_332.png",
"text_plain_output_147.png",
"text_plain_output_443.png",
"text_plain_output_327.png",
"text_plain_output_684.png",
"text_plain_output_774.png",
"text_plain_output_256.png",
"text_plain_output_90.png",
"text_plain_output_79.png",
"text_plain_output_331.png",
"text_plain_output_5.png",
"text_plain_output_642.png",
"text_plain_output_550.png",
"text_plain_output_75.png",
"text_plain_output_48.png",
"text_plain_output_388.png",
"text_plain_output_422.png",
"text_plain_output_116.png",
"text_plain_output_128.png",
"text_plain_output_30.png",
"text_plain_output_167.png",
"text_plain_output_213.png",
"text_plain_output_73.png",
"text_plain_output_126.png",
"text_plain_output_676.png",
"text_plain_output_704.png",
"text_plain_output_687.png",
"text_plain_output_776.png",
"text_plain_output_492.png",
"text_plain_output_321.png",
"text_plain_output_272.png",
"text_plain_output_115.png",
"text_plain_output_748.png",
"text_plain_output_474.png",
"text_plain_output_407.png",
"text_plain_output_482.png",
"text_plain_output_316.png",
"text_plain_output_634.png",
"text_plain_output_656.png",
"text_plain_output_355.png",
"text_plain_output_15.png",
"text_plain_output_390.png",
"text_plain_output_133.png",
"text_plain_output_771.png",
"text_plain_output_651.png",
"text_plain_output_437.png",
"text_plain_output_198.png",
"text_plain_output_699.png",
"text_plain_output_387.png",
"text_plain_output_555.png",
"text_plain_output_548.png",
"text_plain_output_759.png",
"text_plain_output_178.png",
"text_plain_output_226.png",
"text_plain_output_154.png",
"text_plain_output_234.png",
"text_plain_output_375.png",
"text_plain_output_404.png",
"text_plain_output_114.png",
"text_plain_output_659.png",
"text_plain_output_515.png",
"text_plain_output_157.png",
"text_plain_output_773.png",
"text_plain_output_494.png",
"text_plain_output_317.png",
"text_plain_output_251.png",
"text_plain_output_470.png",
"text_plain_output_496.png",
"text_plain_output_423.png",
"text_plain_output_70.png",
"text_plain_output_9.png",
"text_plain_output_712.png",
"text_plain_output_484.png",
"text_plain_output_44.png",
"text_plain_output_633.png",
"text_plain_output_325.png",
"text_plain_output_203.png",
"text_plain_output_603.png",
"text_plain_output_655.png",
"text_plain_output_119.png",
"text_plain_output_546.png",
"text_plain_output_540.png",
"text_plain_output_373.png",
"text_plain_output_504.png",
"text_plain_output_86.png",
"text_plain_output_244.png",
"text_plain_output_741.png",
"text_plain_output_118.png",
"text_plain_output_551.png",
"text_plain_output_583.png",
"text_plain_output_131.png",
"text_plain_output_40.png",
"text_plain_output_343.png",
"text_plain_output_123.png",
"text_plain_output_74.png",
"text_plain_output_734.png",
"text_plain_output_190.png",
"text_plain_output_302.png",
"text_plain_output_604.png",
"text_plain_output_31.png",
"text_plain_output_340.png",
"text_plain_output_379.png",
"text_plain_output_281.png",
"text_plain_output_639.png",
"text_plain_output_20.png",
"text_plain_output_557.png",
"text_plain_output_273.png",
"text_plain_output_706.png",
"text_plain_output_263.png",
"text_plain_output_102.png",
"text_plain_output_229.png",
"text_plain_output_111.png",
"text_plain_output_686.png",
"text_plain_output_753.png",
"text_plain_output_669.png",
"text_plain_output_414.png",
"text_plain_output_461.png",
"text_plain_output_510.png",
"text_plain_output_222.png",
"text_plain_output_589.png",
"text_plain_output_101.png",
"text_plain_output_530.png",
"text_plain_output_169.png",
"text_plain_output_531.png",
"text_plain_output_144.png",
"text_plain_output_161.png",
"text_plain_output_489.png",
"text_plain_output_305.png",
"text_plain_output_275.png",
"text_plain_output_779.png",
"text_plain_output_725.png",
"text_plain_output_301.png",
"text_plain_output_132.png",
"text_plain_output_60.png",
"text_plain_output_691.png",
"text_plain_output_764.png",
"text_plain_output_467.png",
"text_plain_output_502.png",
"text_plain_output_221.png",
"text_plain_output_596.png",
"text_plain_output_564.png",
"text_plain_output_552.png",
"text_plain_output_720.png",
"text_plain_output_654.png",
"text_plain_output_330.png",
"text_plain_output_155.png",
"text_plain_output_638.png",
"text_plain_output_434.png",
"text_plain_output_68.png",
"text_plain_output_4.png",
"text_plain_output_65.png",
"text_plain_output_618.png",
"text_plain_output_64.png",
"text_plain_output_419.png",
"text_plain_output_215.png",
"text_plain_output_532.png",
"text_plain_output_189.png",
"text_plain_output_415.png",
"text_plain_output_637.png",
"text_plain_output_13.png",
"text_plain_output_200.png",
"text_plain_output_666.png",
"text_plain_output_107.png",
"text_plain_output_746.png",
"text_plain_output_567.png",
"text_plain_output_628.png",
"text_plain_output_398.png",
"text_plain_output_312.png",
"text_plain_output_248.png",
"text_plain_output_695.png",
"text_plain_output_318.png",
"text_plain_output_417.png",
"text_plain_output_707.png",
"text_plain_output_690.png",
"text_plain_output_52.png",
"text_plain_output_545.png",
"text_plain_output_393.png",
"text_plain_output_758.png",
"text_plain_output_572.png",
"text_plain_output_594.png",
"text_plain_output_66.png",
"text_plain_output_446.png",
"text_plain_output_243.png",
"text_plain_output_611.png",
"text_plain_output_45.png",
"text_plain_output_380.png",
"text_plain_output_599.png",
"text_plain_output_692.png",
"text_plain_output_442.png",
"text_plain_output_665.png",
"text_plain_output_300.png",
"text_plain_output_660.png",
"text_plain_output_257.png",
"text_plain_output_405.png",
"text_plain_output_353.png",
"text_plain_output_476.png",
"text_plain_output_277.png",
"text_plain_output_457.png",
"text_plain_output_739.png",
"text_plain_output_740.png",
"text_plain_output_361.png",
"text_plain_output_171.png",
"text_plain_output_518.png",
"text_plain_output_561.png",
"text_plain_output_431.png",
"text_plain_output_14.png",
"text_plain_output_159.png",
"text_plain_output_32.png",
"text_plain_output_516.png",
"text_plain_output_304.png",
"text_plain_output_88.png",
"text_plain_output_240.png",
"text_plain_output_713.png",
"text_plain_output_29.png",
"text_plain_output_359.png",
"text_plain_output_529.png",
"text_plain_output_347.png",
"text_plain_output_140.png",
"text_plain_output_606.png",
"text_plain_output_763.png",
"text_plain_output_376.png",
"text_plain_output_280.png",
"text_plain_output_783.png",
"text_plain_output_129.png",
"text_plain_output_728.png",
"text_plain_output_349.png",
"text_plain_output_242.png",
"text_plain_output_483.png",
"text_plain_output_460.png",
"text_plain_output_363.png",
"text_plain_output_289.png",
"text_plain_output_255.png",
"text_plain_output_160.png",
"text_plain_output_58.png",
"text_plain_output_680.png",
"text_plain_output_622.png",
"text_plain_output_329.png",
"text_plain_output_49.png",
"text_plain_output_708.png",
"text_plain_output_63.png",
"text_plain_output_260.png",
"text_plain_output_294.png",
"text_plain_output_27.png",
"text_plain_output_392.png",
"text_plain_output_320.png",
"text_plain_output_177.png",
"text_plain_output_607.png",
"text_plain_output_386.png",
"text_plain_output_438.png",
"text_plain_output_76.png",
"text_plain_output_681.png",
"text_plain_output_333.png",
"text_plain_output_108.png",
"text_plain_output_581.png",
"text_plain_output_54.png",
"text_plain_output_142.png",
"text_plain_output_10.png",
"text_plain_output_700.png",
"text_plain_output_269.png",
"text_plain_output_276.png",
"text_plain_output_6.png",
"text_plain_output_326.png",
"text_plain_output_744.png",
"text_plain_output_503.png",
"text_plain_output_578.png",
"text_plain_output_735.png",
"text_plain_output_153.png",
"text_plain_output_170.png",
"text_plain_output_92.png",
"text_plain_output_658.png",
"text_plain_output_57.png",
"text_plain_output_120.png",
"text_plain_output_469.png",
"text_plain_output_24.png",
"text_plain_output_357.png",
"text_plain_output_21.png",
"text_plain_output_344.png",
"text_plain_output_104.png",
"text_plain_output_784.png",
"text_plain_output_270.png",
"text_plain_output_47.png",
"text_plain_output_623.png",
"text_plain_output_466.png",
"text_plain_output_568.png",
"text_plain_output_121.png",
"text_plain_output_25.png",
"text_plain_output_134.png",
"text_plain_output_523.png",
"text_plain_output_401.png",
"text_plain_output_77.png",
"text_plain_output_421.png",
"text_plain_output_288.png",
"text_plain_output_535.png",
"text_plain_output_527.png",
"text_plain_output_488.png",
"text_plain_output_18.png",
"text_plain_output_183.png",
"text_plain_output_266.png",
"text_plain_output_149.png",
"text_plain_output_208.png",
"text_plain_output_50.png",
"text_plain_output_36.png",
"text_plain_output_646.png",
"text_plain_output_383.png",
"text_plain_output_207.png",
"text_plain_output_766.png",
"text_plain_output_391.png",
"text_plain_output_413.png",
"text_plain_output_709.png",
"text_plain_output_96.png",
"text_plain_output_663.png",
"text_plain_output_87.png",
"text_plain_output_3.png",
"text_plain_output_217.png",
"text_plain_output_418.png",
"text_plain_output_657.png",
"text_plain_output_427.png",
"text_plain_output_180.png",
"text_plain_output_556.png",
"text_plain_output_141.png",
"text_plain_output_210.png",
"text_plain_output_112.png",
"text_plain_output_152.png",
"text_plain_output_225.png",
"text_plain_output_701.png",
"text_plain_output_191.png",
"text_plain_output_609.png",
"text_plain_output_737.png",
"text_plain_output_259.png",
"text_plain_output_447.png",
"text_plain_output_290.png",
"text_plain_output_506.png",
"text_plain_output_283.png",
"text_plain_output_495.png",
"text_plain_output_247.png",
"text_plain_output_113.png",
"text_plain_output_371.png",
"text_plain_output_479.png",
"text_plain_output_324.png",
"text_plain_output_22.png",
"text_plain_output_188.png",
"text_plain_output_366.png",
"text_plain_output_328.png",
"text_plain_output_81.png",
"text_plain_output_730.png",
"text_plain_output_69.png",
"text_plain_output_368.png",
"text_plain_output_667.png",
"text_plain_output_372.png",
"text_plain_output_175.png",
"text_plain_output_165.png",
"text_plain_output_767.png",
"text_plain_output_542.png",
"text_plain_output_146.png",
"text_plain_output_145.png",
"text_plain_output_125.png",
"text_plain_output_754.png",
"text_plain_output_454.png",
"text_plain_output_487.png",
"text_plain_output_595.png",
"text_plain_output_643.png",
"text_plain_output_338.png",
"text_plain_output_575.png",
"text_plain_output_197.png",
"text_plain_output_512.png",
"text_plain_output_777.png",
"text_plain_output_738.png",
"text_plain_output_382.png",
"text_plain_output_315.png",
"text_plain_output_429.png",
"text_plain_output_38.png",
"text_plain_output_517.png",
"text_plain_output_682.png",
"text_plain_output_433.png",
"text_plain_output_7.png",
"text_plain_output_528.png",
"text_plain_output_648.png",
"text_plain_output_214.png",
"text_plain_output_166.png",
"text_plain_output_358.png",
"text_plain_output_726.png",
"text_plain_output_513.png",
"text_plain_output_714.png",
"text_plain_output_314.png",
"text_plain_output_745.png",
"text_plain_output_592.png",
"text_plain_output_410.png",
"text_plain_output_432.png",
"text_plain_output_645.png",
"text_plain_output_411.png",
"text_plain_output_91.png",
"text_plain_output_308.png",
"text_plain_output_245.png",
"text_plain_output_16.png",
"text_plain_output_497.png",
"text_plain_output_174.png",
"text_plain_output_212.png",
"text_plain_output_652.png",
"text_plain_output_644.png",
"text_plain_output_230.png",
"text_plain_output_265.png",
"text_plain_output_430.png",
"text_plain_output_742.png",
"text_plain_output_630.png",
"text_plain_output_778.png",
"text_plain_output_435.png",
"text_plain_output_689.png",
"text_plain_output_378.png",
"text_plain_output_59.png",
"text_plain_output_580.png",
"text_plain_output_409.png",
"text_plain_output_206.png",
"text_plain_output_103.png",
"text_plain_output_71.png",
"text_plain_output_732.png",
"text_plain_output_751.png",
"text_plain_output_539.png",
"text_plain_output_8.png",
"text_plain_output_122.png",
"text_plain_output_384.png",
"text_plain_output_498.png",
"text_plain_output_211.png",
"text_plain_output_662.png",
"text_plain_output_780.png",
"text_plain_output_182.png",
"text_plain_output_26.png",
"text_plain_output_601.png",
"text_plain_output_554.png",
"text_plain_output_536.png",
"text_plain_output_620.png",
"text_plain_output_406.png",
"text_plain_output_310.png",
"text_plain_output_760.png",
"text_plain_output_456.png",
"text_plain_output_541.png",
"text_plain_output_558.png",
"text_plain_output_668.png",
"text_plain_output_702.png",
"text_plain_output_724.png",
"text_plain_output_220.png",
"text_plain_output_749.png",
"text_plain_output_653.png",
"text_plain_output_543.png",
"text_plain_output_781.png",
"text_plain_output_451.png",
"text_plain_output_109.png",
"text_plain_output_459.png",
"text_plain_output_238.png",
"text_plain_output_520.png",
"text_plain_output_616.png",
"text_plain_output_615.png",
"text_plain_output_41.png",
"text_plain_output_34.png",
"text_plain_output_612.png",
"text_plain_output_253.png",
"text_plain_output_346.png",
"text_plain_output_723.png",
"text_plain_output_291.png",
"text_plain_output_168.png",
"text_plain_output_394.png",
"text_plain_output_204.png",
"text_plain_output_241.png",
"text_plain_output_231.png",
"text_plain_output_533.png",
"text_plain_output_345.png",
"text_plain_output_649.png",
"text_plain_output_350.png",
"text_plain_output_209.png",
"text_plain_output_185.png",
"text_plain_output_85.png",
"text_plain_output_765.png",
"text_plain_output_636.png",
"text_plain_output_42.png",
"text_plain_output_110.png",
"text_plain_output_605.png",
"text_plain_output_549.png",
"text_plain_output_67.png",
"text_plain_output_508.png",
"text_plain_output_573.png",
"text_plain_output_468.png",
"text_plain_output_370.png",
"text_plain_output_297.png",
"text_plain_output_53.png",
"text_plain_output_313.png",
"text_plain_output_224.png",
"text_plain_output_635.png",
"text_plain_output_703.png",
"text_plain_output_711.png",
"text_plain_output_193.png",
"text_plain_output_441.png",
"text_plain_output_403.png",
"text_plain_output_696.png",
"text_plain_output_23.png",
"text_plain_output_761.png",
"text_plain_output_610.png",
"text_plain_output_173.png",
"text_plain_output_683.png",
"text_plain_output_235.png",
"text_plain_output_151.png",
"text_plain_output_89.png",
"text_plain_output_299.png",
"text_plain_output_632.png",
"text_plain_output_51.png",
"text_plain_output_677.png",
"text_plain_output_626.png",
"text_plain_output_450.png",
"text_plain_output_252.png",
"text_plain_output_296.png",
"text_plain_output_525.png",
"text_plain_output_731.png",
"text_plain_output_672.png",
"text_plain_output_705.png",
"text_plain_output_28.png",
"text_plain_output_72.png",
"text_plain_output_99.png",
"text_plain_output_381.png",
"text_plain_output_571.png",
"text_plain_output_163.png",
"text_plain_output_179.png",
"text_plain_output_537.png",
"text_plain_output_162.png",
"text_plain_output_136.png",
"text_plain_output_602.png",
"text_plain_output_246.png",
"text_plain_output_2.png",
"text_plain_output_569.png",
"text_plain_output_772.png",
"text_plain_output_239.png",
"text_plain_output_127.png",
"text_plain_output_559.png",
"text_plain_output_311.png",
"text_plain_output_710.png",
"text_plain_output_500.png",
"text_plain_output_719.png",
"text_plain_output_295.png",
"text_plain_output_279.png",
"text_plain_output_507.png",
"text_plain_output_590.png",
"text_plain_output_509.png",
"text_plain_output_337.png",
"text_plain_output_562.png",
"text_plain_output_499.png",
"text_plain_output_196.png",
"text_plain_output_342.png",
"text_plain_output_563.png",
"text_plain_output_97.png",
"text_plain_output_729.png",
"text_plain_output_717.png",
"text_plain_output_227.png",
"text_plain_output_453.png",
"text_plain_output_1.png",
"text_plain_output_33.png",
"text_plain_output_650.png",
"text_plain_output_150.png",
"text_plain_output_631.png",
"text_plain_output_39.png",
"text_plain_output_752.png",
"text_plain_output_176.png",
"text_plain_output_584.png",
"text_plain_output_335.png",
"text_plain_output_186.png",
"text_plain_output_233.png",
"text_plain_output_228.png",
"text_plain_output_473.png",
"text_plain_output_385.png",
"text_plain_output_478.png",
"text_plain_output_762.png",
"text_plain_output_55.png",
"text_plain_output_412.png",
"text_plain_output_293.png",
"text_plain_output_268.png",
"text_plain_output_436.png",
"text_plain_output_199.png",
"text_plain_output_354.png",
"text_plain_output_463.png",
"text_plain_output_360.png",
"text_plain_output_319.png",
"text_plain_output_82.png",
"text_plain_output_356.png",
"text_plain_output_202.png",
"text_plain_output_93.png",
"text_plain_output_698.png",
"text_plain_output_336.png",
"text_plain_output_19.png",
"text_plain_output_439.png",
"text_plain_output_341.png",
"text_plain_output_105.png",
"text_plain_output_465.png",
"text_plain_output_80.png",
"text_plain_output_491.png",
"text_plain_output_679.png",
"text_plain_output_641.png",
"text_plain_output_94.png",
"text_plain_output_164.png",
"text_plain_output_249.png",
"text_plain_output_534.png",
"text_plain_output_444.png",
"text_plain_output_619.png",
"text_plain_output_216.png",
"text_plain_output_124.png",
"text_plain_output_17.png",
"text_plain_output_148.png",
"text_plain_output_323.png",
"text_plain_output_694.png",
"text_plain_output_402.png",
"text_plain_output_755.png",
"text_plain_output_722.png",
"text_plain_output_424.png",
"text_plain_output_486.png",
"text_plain_output_597.png",
"text_plain_output_250.png",
"text_plain_output_11.png",
"text_plain_output_481.png",
"text_plain_output_560.png",
"text_plain_output_526.png",
"text_plain_output_400.png",
"text_plain_output_524.png",
"text_plain_output_538.png",
"text_plain_output_12.png",
"text_plain_output_267.png",
"text_plain_output_553.png",
"text_plain_output_408.png",
"text_plain_output_425.png",
"text_plain_output_591.png",
"text_plain_output_428.png",
"text_plain_output_416.png",
"text_plain_output_625.png",
"text_plain_output_194.png",
"text_plain_output_577.png",
"text_plain_output_727.png",
"text_plain_output_747.png",
"text_plain_output_782.png",
"text_plain_output_519.png",
"text_plain_output_62.png",
"text_plain_output_733.png",
"text_plain_output_721.png",
"text_plain_output_480.png",
"text_plain_output_757.png",
"text_plain_output_303.png",
"text_plain_output_621.png",
"text_plain_output_377.png",
"text_plain_output_440.png",
"text_plain_output_95.png",
"text_plain_output_339.png",
"text_plain_output_458.png",
"text_plain_output_464.png",
"text_plain_output_156.png",
"text_plain_output_547.png",
"text_plain_output_298.png",
"text_plain_output_369.png",
"text_plain_output_348.png",
"text_plain_output_587.png",
"text_plain_output_448.png",
"text_plain_output_364.png",
"text_plain_output_365.png",
"text_plain_output_61.png",
"text_plain_output_585.png",
"text_plain_output_352.png",
"text_plain_output_83.png",
"text_plain_output_374.png",
"text_plain_output_647.png",
"text_plain_output_472.png",
"text_plain_output_566.png",
"text_plain_output_397.png",
"text_plain_output_600.png",
"text_plain_output_661.png",
"text_plain_output_389.png",
"text_plain_output_292.png",
"text_plain_output_351.png",
"text_plain_output_135.png",
"text_plain_output_285.png",
"text_plain_output_574.png",
"text_plain_output_582.png",
"text_plain_output_306.png",
"text_plain_output_675.png",
"text_plain_output_493.png",
"text_plain_output_46.png"
] | from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.callbacks import EarlyStopping
import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3
pretrained_base = InceptionV3(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
pretrained_base.trainable = False
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(min_delta=0.001, patience=8, restore_best_weights=True)
model = tf.keras.models.Sequential([pretrained_base, tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(5, activation='softmax')])
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=0.001), metrics=['accuracy']) | code |
50223292/cell_4 | [
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import cv2
image = cv2.imread('../input/cassava-leaf-disease-classification/test_images/2216849948.jpg')
plt.figure(figsize=(20, 10))
plt.imshow(image)
plt.axis('off')
plt.show() | code |
50223292/cell_6 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import os
num_test_images = len(os.listdir('../input/cassava-leaf-disease-classification/test_images'))
num_test_images
num_train_images = len(os.listdir('../input/cassava-leaf-disease-classification/train_images'))
num_train_images
import cv2
image = cv2.imread('../input/cassava-leaf-disease-classification/test_images/2216849948.jpg')
plt.axis('off')
plant = os.listdir('../input/cassava-leaf-disease-classification/train_images')
plant_dir = '../input/cassava-leaf-disease-classification/train_images'
plt.figure(figsize=(20, 10))
for i in range(9):
plt.subplot(3, 3, i + 1)
img = plt.imread(os.path.join(plant_dir, plant[i]))
plt.imshow(img)
plt.axis('off')
plt.tight_layout() | code |
50223292/cell_2 | [
"image_output_1.png"
] | import os
num_test_images = len(os.listdir('../input/cassava-leaf-disease-classification/test_images'))
num_test_images | code |
50223292/cell_11 | [
"text_plain_output_1.png"
] | from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
import pandas as pd
import tensorflow as tf
sub = pd.read_csv('../input/cassava-leaf-disease-classification/sample_submission.csv')
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
train['label'] = train['label'].astype('str')
from tensorflow.keras.applications.inception_v3 import InceptionV3
pretrained_base = InceptionV3(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
pretrained_base.trainable = False
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(min_delta=0.001, patience=8, restore_best_weights=True)
model = tf.keras.models.Sequential([pretrained_base, tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(5, activation='softmax')])
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=0.001), metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_data = train_datagen.flow_from_dataframe(dataframe=train, directory='../input/cassava-leaf-disease-classification/train_images', x_col='image_id', y_col='label', batch_size=16, target_size=(150, 150))
validation_data = train_datagen.flow_from_dataframe(dataframe=train, directory='../input/cassava-leaf-disease-classification/train_images', x_col='image_id', y_col='label', batch_size=16, target_size=(150, 150))
history = model.fit(train_data, epochs=10, validation_data=validation_data, callbacks=[early_stopping]) | code |
50223292/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.inception_v3 import InceptionV3
pretrained_base = InceptionV3(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
pretrained_base.trainable = False | code |
50223292/cell_3 | [
"text_html_output_1.png"
] | import os
num_test_images = len(os.listdir('../input/cassava-leaf-disease-classification/test_images'))
num_test_images
num_train_images = len(os.listdir('../input/cassava-leaf-disease-classification/train_images'))
num_train_images | code |
50223292/cell_10 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
import pandas as pd
sub = pd.read_csv('../input/cassava-leaf-disease-classification/sample_submission.csv')
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
train['label'] = train['label'].astype('str')
train_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_data = train_datagen.flow_from_dataframe(dataframe=train, directory='../input/cassava-leaf-disease-classification/train_images', x_col='image_id', y_col='label', batch_size=16, target_size=(150, 150))
validation_data = train_datagen.flow_from_dataframe(dataframe=train, directory='../input/cassava-leaf-disease-classification/train_images', x_col='image_id', y_col='label', batch_size=16, target_size=(150, 150)) | code |
50223292/cell_5 | [
"text_plain_output_5.png",
"text_plain_output_9.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_plain_output_11.png",
"text_plain_output_12.png"
] | import pandas as pd
sub = pd.read_csv('../input/cassava-leaf-disease-classification/sample_submission.csv')
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
train['label'] = train['label'].astype('str')
train.head() | code |
73095834/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
df = pd.read_csv('G:\\MS Avishkara\\winequality-red.csv', sep=';')
df.head() | code |
16166947/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import statsmodels.api as sm
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
import statsmodels.api as sm
sample = solo_train_data.sample(n=10)
sns.regplot(x='damageDealt', y='winPlacePerc', x_jitter=5, order=2, data=solo_train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]) | code |
16166947/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from scipy import stats
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
print('pure random\n', stats.linregress(np.random.random(200), np.random.random(200)).rvalue ** 2)
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
print('20 samples\n', stats.linregress(solo_train_data.sample(n=20)[['damageDealt', 'winPlacePerc']]).rvalue ** 2)
print('200 samples\n', stats.linregress(solo_train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]).rvalue ** 2)
print('2000 samples\n', stats.linregress(solo_train_data.sample(n=2000)[['damageDealt', 'winPlacePerc']]).rvalue ** 2)
print('20000 samples\n', stats.linregress(solo_train_data.sample(n=20000)[['damageDealt', 'winPlacePerc']]).rvalue ** 2) | code |
16166947/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
sns.relplot(x='damageDealt', y='winPlacePerc', data=data_to_plot) | code |
16166947/cell_25 | [
"text_plain_output_1.png"
] | from scipy import stats
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import statsmodels.api as sm
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
import statsmodels.api as sm
sample = solo_train_data.sample(n=10)
from sklearn.model_selection import train_test_split
clean_train_data = train_data[['damageDealt', 'winPlacePerc']].dropna()
X_train, X_test, y_train, y_test = train_test_split(clean_train_data, clean_train_data.winPlacePerc, test_size=0.2)
model = sm.Logit(y_train, X_train.damageDealt)
result = model.fit() | code |
16166947/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import statsmodels.api as sm
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
import statsmodels.api as sm
sample = solo_train_data.sample(n=10)
sns.regplot(x='damageDealt', y='winPlacePerc', x_jitter=5, logistic=True, data=solo_train_data.sample(n=2000)[['damageDealt', 'winPlacePerc']]) | code |
16166947/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import statsmodels.api as sm
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
import statsmodels.api as sm
sample = solo_train_data.sample(n=10)
from sklearn.model_selection import train_test_split
clean_train_data = train_data[['damageDealt', 'winPlacePerc']].dropna()
X_train, X_test, y_train, y_test = train_test_split(clean_train_data, clean_train_data.winPlacePerc, test_size=0.2)
model = sm.Logit(y_train, X_train.damageDealt)
result = model.fit()
from sklearn.metrics import mean_absolute_error
predictions = result.predict(X_test.damageDealt)
sns.scatterplot(y_test[:200], predictions[:200])
print(mean_absolute_error(y_test, np.random.random(len(y_test))))
print(mean_absolute_error(y_test, predictions)) | code |
16166947/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16166947/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
sns.relplot(x='damageDealt', y='winPlacePerc', data=data_to_plot) | code |
16166947/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import statsmodels.api as sm
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
import statsmodels.api as sm
sample = solo_train_data.sample(n=10)
print(sample[['winPlacePerc', 'damageDealt']])
print(sm.nonparametric.lowess(sample['winPlacePerc'], sample['damageDealt'])) | code |
16166947/cell_1 | [
"text_plain_output_1.png"
] | # https://stackoverflow.com/questions/56283294/importerror-cannot-import-name-factorial
!pip install statsmodels==0.10.0rc2 | code |
16166947/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']] | code |
16166947/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import statsmodels.api as sm
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
import statsmodels.api as sm
sample = solo_train_data.sample(n=10)
from sklearn.model_selection import train_test_split
clean_train_data = train_data[['damageDealt', 'winPlacePerc']].dropna()
X_train, X_test, y_train, y_test = train_test_split(clean_train_data, clean_train_data.winPlacePerc, test_size=0.2)
model = sm.Logit(y_train, X_train.damageDealt)
result = model.fit()
from sklearn.metrics import mean_absolute_error
predictions = result.predict(X_test.damageDealt)
garbled_predictions = (predictions - 0.5) * 2.0
print(mean_absolute_error(y_test, garbled_predictions))
sns.scatterplot(y_test[:200], garbled_predictions[:200]) | code |
16166947/cell_15 | [
"text_html_output_1.png"
] | from scipy import stats
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
sns.regplot(x='damageDealt', y='winPlacePerc', x_jitter=5, data=solo_train_data.sample(n=500)[['damageDealt', 'winPlacePerc']]) | code |
16166947/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy import stats
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.head(10)[['damageDealt', 'winPlacePerc']]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='darkgrid')
data_to_plot = train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]
data_to_plot = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})].sample(n=200)[['damageDealt', 'winPlacePerc']]
from scipy import stats
solo_train_data = train_data[train_data.matchType.isin({'solo', 'solo-fpp'})]
sns.regplot(x='damageDealt', y='winPlacePerc', x_jitter=5, lowess=True, data=solo_train_data.sample(n=200)[['damageDealt', 'winPlacePerc']]) | code |
16166947/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/train_V2.csv')
test_data = pd.read_csv('../input/test_V2.csv')
train_data.info()
train_data.head() | code |
74041294/cell_23 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from mlxtend.frequent_patterns import fpgrowth, association_rules # MBA
import numpy as np
import os
import pandas as pd
def preDot(text):
return text.rsplit('.', 1)[0]
np.random.seed(73)
pd.options.mode.chained_assignment = None
dataDict = {}
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
with ZipFile(os.path.join(dirname, filename), 'r') as zipf:
unzipped_fn = preDot(filename)
with zipf.open(unzipped_fn) as f:
dataDict[preDot(unzipped_fn)] = pd.read_csv(f)
train_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'train'].drop('eval_set', axis=1)
prior_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'prior'].drop('eval_set', axis=1)
test_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'test'].drop('eval_set', axis=1)
small_train = dataDict['order_products__train'][['order_id', 'product_id']]
small_train_split = (small_train[:461543], small_train[461543:461543 * 2 - 1], small_train[461543 * 2 - 1:])
pivots = []
for df in small_train_split:
pvt = ~df.pivot(index='order_id', columns='product_id', values='product_id').isna()
pivots.append(pvt.astype(pd.SparseDtype(bool)))
del pvt
product_cols = sorted(small_train.product_id.unique())
for i in range(len(pivots)):
pivots[i] = pivots[i].reindex(columns=product_cols, fill_value=False).astype(pd.SparseDtype(bool))
pivots[i] = sparse.csr_matrix(pivots[i])
pivots = sparse.vstack(pivots)
truth_table = pd.DataFrame(pivots.todense(), index=small_train.order_id.unique(), columns=product_cols)
frequent_itemsets = fpgrowth(truth_table, min_support=5 / len(truth_table), use_colnames=True)
rules = association_rules(frequent_itemsets, metric='confidence', min_threshold=0.8)
rules_ante_cons = rules[['antecedents', 'consequents']]
baskets = small_train.groupby('order_id')['product_id'].apply(frozenset)
baskets.name = 'basket'
recommendations = train_orders.join(baskets, on='order_id')
recommendations['recommendations'] = [frozenset() for _ in range(len(recommendations))]
for idx, antecedent in enumerate(rules_ante_cons['antecedents']):
lookup = (antecedent <= recommendations.basket, 'recommendations')
recommendations.loc[lookup] = recommendations.loc[lookup].apply(frozenset.union, args=(rules_ante_cons.loc[idx, 'consequents'],))
recommendations.loc[:, 'recommendations'] = recommendations.recommendations - recommendations.basket
non_empty_recs = recommendations[recommendations.recommendations.apply(bool)]
print('1 out of approx.', round(1 / (len(non_empty_recs) / len(recommendations))), 'transactions will result in a recommendation being suggested to a customer.')
def map_products(codes):
if isinstance(codes, pd.Series):
return codes.apply(map_products)
return frozenset(map(products.get, codes))
products = dataDict['products']
products = products.set_index('product_id')['product_name'].to_dict()
non_empty_recs.loc[:, ['basket', 'recommendations']] = non_empty_recs[['basket', 'recommendations']].apply(map_products)
display(non_empty_recs) | code |
74041294/cell_26 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from mlxtend.frequent_patterns import fpgrowth, association_rules # MBA
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
def preDot(text):
return text.rsplit('.', 1)[0]
np.random.seed(73)
pd.options.mode.chained_assignment = None
dataDict = {}
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
with ZipFile(os.path.join(dirname, filename), 'r') as zipf:
unzipped_fn = preDot(filename)
with zipf.open(unzipped_fn) as f:
dataDict[preDot(unzipped_fn)] = pd.read_csv(f)
train_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'train'].drop('eval_set', axis=1)
prior_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'prior'].drop('eval_set', axis=1)
test_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'test'].drop('eval_set', axis=1)
small_train = dataDict['order_products__train'][['order_id', 'product_id']]
small_train_split = (small_train[:461543], small_train[461543:461543 * 2 - 1], small_train[461543 * 2 - 1:])
pivots = []
for df in small_train_split:
pvt = ~df.pivot(index='order_id', columns='product_id', values='product_id').isna()
pivots.append(pvt.astype(pd.SparseDtype(bool)))
del pvt
product_cols = sorted(small_train.product_id.unique())
for i in range(len(pivots)):
pivots[i] = pivots[i].reindex(columns=product_cols, fill_value=False).astype(pd.SparseDtype(bool))
pivots[i] = sparse.csr_matrix(pivots[i])
pivots = sparse.vstack(pivots)
truth_table = pd.DataFrame(pivots.todense(), index=small_train.order_id.unique(), columns=product_cols)
frequent_itemsets = fpgrowth(truth_table, min_support=5 / len(truth_table), use_colnames=True)
rules = association_rules(frequent_itemsets, metric='confidence', min_threshold=0.8)
rules_ante_cons = rules[['antecedents', 'consequents']]
baskets = small_train.groupby('order_id')['product_id'].apply(frozenset)
baskets.name = 'basket'
recommendations = train_orders.join(baskets, on='order_id')
recommendations['recommendations'] = [frozenset() for _ in range(len(recommendations))]
for idx, antecedent in enumerate(rules_ante_cons['antecedents']):
lookup = (antecedent <= recommendations.basket, 'recommendations')
recommendations.loc[lookup] = recommendations.loc[lookup].apply(frozenset.union, args=(rules_ante_cons.loc[idx, 'consequents'],))
recommendations.loc[:, 'recommendations'] = recommendations.recommendations - recommendations.basket
non_empty_recs = recommendations[recommendations.recommendations.apply(bool)]
def map_products(codes):
if isinstance(codes, pd.Series):
return codes.apply(map_products)
return frozenset(map(products.get, codes))
products = dataDict['products']
products = products.set_index('product_id')['product_name'].to_dict()
non_empty_recs.loc[:, ['basket', 'recommendations']] = non_empty_recs[['basket', 'recommendations']].apply(map_products)
def mba_diagram(sample_basket, sample_recommendation):
import matplotlib.pyplot as plt
def get_text_box_coords(txt):
we = plt.Text.get_window_extent(txt, renderer=fig.canvas.get_renderer())
return ax.transAxes.inverted().transform(we)
def get_rightmost_vmid(box):
return box[1][0], (box[0][1] + box[1][1]) / 2
fig, ax = plt.subplots(figsize=(20,10))
title = ax.set_title("An illustration of a recommendation system for a sample customer basket\n(basket ← suggestion)", fontsize=18)
ax.axis('off')
basket_txt = ax.text(.05, .95, sample_basket, ha='left', va='top', wrap=True,size=12,
bbox=dict(boxstyle='round,pad=1', fc='w', ec='lightblue'))
basket_rightmost, basket_vmid = get_rightmost_vmid(get_text_box_coords(basket_txt))
arrow_txt = ax.text(
basket_rightmost*1.4, basket_vmid, "Add", ha="center", va="center", size=35,
bbox=dict(boxstyle="larrow,pad=0.6", fc="lightgreen", ec="g", lw=2))
arrow_rightmost, arrow_vmid = get_rightmost_vmid(get_text_box_coords(arrow_txt))
recommendation_txt = ax.text(arrow_rightmost * 1.14, arrow_vmid, sample_recommendation, ha='left', va='top', wrap=True, fontsize=25,
bbox=dict(boxstyle='round,pad=1', fc='w', ec='r'))
recommendation_txt_pos = recommendation_txt.get_position()
recommendation_txt.set_position((
recommendation_txt_pos[0],
recommendation_txt_pos[1] + (get_text_box_coords(recommendation_txt)[1][1]-get_text_box_coords(recommendation_txt)[0][1]) / 2
))
sample_index = np.random.randint(len(non_empty_recs))
sample_basket = '\n'.join(non_empty_recs.iloc[sample_index].loc['basket'])
sample_recommendation = '\n'.join(non_empty_recs.iloc[sample_index].loc['recommendations'])
mba_diagram(sample_basket, sample_recommendation) | code |
74041294/cell_16 | [
"text_html_output_1.png"
] | from mlxtend.frequent_patterns import fpgrowth, association_rules # MBA
import numpy as np
import os
import pandas as pd
def preDot(text):
return text.rsplit('.', 1)[0]
np.random.seed(73)
pd.options.mode.chained_assignment = None
dataDict = {}
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
with ZipFile(os.path.join(dirname, filename), 'r') as zipf:
unzipped_fn = preDot(filename)
with zipf.open(unzipped_fn) as f:
dataDict[preDot(unzipped_fn)] = pd.read_csv(f)
train_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'train'].drop('eval_set', axis=1)
prior_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'prior'].drop('eval_set', axis=1)
test_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'test'].drop('eval_set', axis=1)
small_train = dataDict['order_products__train'][['order_id', 'product_id']]
small_train_split = (small_train[:461543], small_train[461543:461543 * 2 - 1], small_train[461543 * 2 - 1:])
pivots = []
for df in small_train_split:
pvt = ~df.pivot(index='order_id', columns='product_id', values='product_id').isna()
pivots.append(pvt.astype(pd.SparseDtype(bool)))
del pvt
product_cols = sorted(small_train.product_id.unique())
for i in range(len(pivots)):
pivots[i] = pivots[i].reindex(columns=product_cols, fill_value=False).astype(pd.SparseDtype(bool))
pivots[i] = sparse.csr_matrix(pivots[i])
pivots = sparse.vstack(pivots)
truth_table = pd.DataFrame(pivots.todense(), index=small_train.order_id.unique(), columns=product_cols)
frequent_itemsets = fpgrowth(truth_table, min_support=5 / len(truth_table), use_colnames=True)
rules = association_rules(frequent_itemsets, metric='confidence', min_threshold=0.8)
print('μ number of consequents:', rules['consequents'].apply(len).mean())
rules | code |
74041294/cell_3 | [
"image_output_1.png"
] | import numpy as np
import os
import pandas as pd
def preDot(text):
return text.rsplit('.', 1)[0]
np.random.seed(73)
pd.options.mode.chained_assignment = None
dataDict = {}
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
with ZipFile(os.path.join(dirname, filename), 'r') as zipf:
unzipped_fn = preDot(filename)
with zipf.open(unzipped_fn) as f:
dataDict[preDot(unzipped_fn)] = pd.read_csv(f)
train_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'train'].drop('eval_set', axis=1)
prior_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'prior'].drop('eval_set', axis=1)
test_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'test'].drop('eval_set', axis=1) | code |
74041294/cell_12 | [
"text_plain_output_1.png"
] | from mlxtend.frequent_patterns import fpgrowth, association_rules # MBA
import numpy as np
import os
import pandas as pd
def preDot(text):
return text.rsplit('.', 1)[0]
np.random.seed(73)
pd.options.mode.chained_assignment = None
dataDict = {}
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
with ZipFile(os.path.join(dirname, filename), 'r') as zipf:
unzipped_fn = preDot(filename)
with zipf.open(unzipped_fn) as f:
dataDict[preDot(unzipped_fn)] = pd.read_csv(f)
train_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'train'].drop('eval_set', axis=1)
prior_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'prior'].drop('eval_set', axis=1)
test_orders = dataDict['orders'][dataDict['orders']['eval_set'] == 'test'].drop('eval_set', axis=1)
small_train = dataDict['order_products__train'][['order_id', 'product_id']]
small_train_split = (small_train[:461543], small_train[461543:461543 * 2 - 1], small_train[461543 * 2 - 1:])
pivots = []
for df in small_train_split:
pvt = ~df.pivot(index='order_id', columns='product_id', values='product_id').isna()
pivots.append(pvt.astype(pd.SparseDtype(bool)))
del pvt
product_cols = sorted(small_train.product_id.unique())
for i in range(len(pivots)):
pivots[i] = pivots[i].reindex(columns=product_cols, fill_value=False).astype(pd.SparseDtype(bool))
pivots[i] = sparse.csr_matrix(pivots[i])
pivots = sparse.vstack(pivots)
truth_table = pd.DataFrame(pivots.todense(), index=small_train.order_id.unique(), columns=product_cols)
frequent_itemsets = fpgrowth(truth_table, min_support=5 / len(truth_table), use_colnames=True)
frequent_itemsets | code |
106206626/cell_21 | [
"text_html_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
x_train = train.drop('register__sales_dollar_amt_this_hour', axis=1)
(x_train.shape, test.shape) | code |
106206626/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month | code |
106206626/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
print(train['register__sales_dollar_amt_this_hour'].skew()) | code |
106206626/cell_23 | [
"text_html_output_1.png",
"image_output_1.png"
] | from flaml import AutoML
from flaml import AutoML
import numpy as np
import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
x_train = train.drop('register__sales_dollar_amt_this_hour', axis=1)
(x_train.shape, test.shape)
from flaml import AutoML
automl = AutoML()
automl.fit(x_train, y_train, task='regression', metric='r2', time_budget=180, n_splits=5, ensemble=False, estimator_list=['lgbm']) | code |
106206626/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
print(train.shape, test.shape)
train.info() | code |
106206626/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
x_train = train.drop('register__sales_dollar_amt_this_hour', axis=1)
x_train.info() | code |
106206626/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import colors
import xgboost as xgb
import os
import sys
from datetime import datetime, timedelta
from time import time
from uuid import uuid4
from scipy.ndimage import convolve1d
from sklearn.metrics import make_scorer, r2_score
from sklearn.model_selection import cross_validate
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
!pip install flaml
from flaml import AutoML | code |
106206626/cell_18 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
import seaborn as sns
def target_mean(data, feature, target, decimal=2):
return data.groupby(feature).agg({target: 'mean'}).sort_values(by=[target], ascending=[False]).round(decimal)
target_mean(train, 'store__type_code', 'register__sales_dollar_amt_this_hour') | code |
106206626/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
import seaborn as sns
def target_mean(data, feature, target, decimal=2):
return data.groupby(feature).agg({target: 'mean'}).sort_values(by=[target], ascending=[False]).round(decimal)
import seaborn as sns
target_mean(train, 'hour_of_day', 'register__sales_dollar_amt_this_hour') | code |
106206626/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red') | code |
106206626/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import seaborn as sns
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
import seaborn as sns
def target_mean(data, feature, target, decimal=2):
return data.groupby(feature).agg({target: 'mean'}).sort_values(by=[target], ascending=[False]).round(decimal)
target_mean(train, 'register__payment_types_accepted', 'register__sales_dollar_amt_this_hour') | code |
106206626/cell_24 | [
"text_html_output_1.png",
"image_output_1.png"
] | from flaml import AutoML
from flaml import AutoML
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
train['register__payment_types_accepted'] = np.where(train['register__payment_types_accepted'] != 'Cash+Credit+Check', 0, 1)
test['register__payment_types_accepted'] = np.where(test['register__payment_types_accepted'] != 'Cash+Credit+Check', 0, 1)
x_train = train.drop('register__sales_dollar_amt_this_hour', axis=1)
(x_train.shape, test.shape)
from flaml import AutoML
automl = AutoML()
automl.fit(x_train, y_train, task='regression', metric='r2', time_budget=180, n_splits=5, ensemble=False, estimator_list=['lgbm'])
print('Best r2 on validation data: {0:.4g}'.format(1 - automl.best_loss))
import numpy as np
feature_importance = automl.feature_importances_
sorted_idx = np.argsort(feature_importance)[-25:]
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 6))
plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center')
plt.yticks(range(len(sorted_idx)), np.array(x_train.columns)[sorted_idx])
plt.title('Feature Importance') | code |
106206626/cell_10 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/juniper-networks-global-ai-challenge/training_dataset.csv')
test = pd.read_csv('../input/juniper-networks-global-ai-challenge/test_dataset.csv')
train.register__sales_dollar_amt_this_hour.plot.density(color='red')
y_train = train['register__sales_dollar_amt_this_hour']
train = train.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2', 'Unnamed: 0'], axis=1)
test = test.drop(['region__peak_sales_dollar_amt_per_hour_v2', 'region__peak_returns_dollar_amt_per_hour_v2'], axis=1)
train['observation_timestamp'] = pd.to_datetime(train['observation_timestamp'])
test['observation_timestamp'] = pd.to_datetime(test['observation_timestamp'])
pd.to_datetime(train['observation_timestamp']).dt.month
import datetime as dt
train['weekday'] = pd.to_datetime(train['observation_timestamp']).dt.weekday
train['weekend'] = np.where(train['weekday'] < 5, 0, 1)
test['weekday'] = pd.to_datetime(test['observation_timestamp']).dt.weekday
test['weekend'] = np.where(test['weekday'] < 5, 0, 1)
train['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
test['month'] = pd.to_datetime(train['observation_timestamp']).dt.month
train = train.drop(['observation_id', 'observation_timestamp'], axis=1)
test = test.drop(['observation_id', 'observation_timestamp'], axis=1)
print(train.shape, test.shape) | code |
17131450/cell_4 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structures = pd.read_csv('../input/structures.csv')
structures = structures.head(n=100)
structures.head(n=10) | code |
17131450/cell_2 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
import os
import warnings
print(os.listdir('../input')) | code |
17131450/cell_8 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | warnings.filterwarnings('ignore')
structures['nearestn'] = structures.groupby('molecule_name')['x'].transform(nn_features)
structures.head(n=10) | code |
17131450/cell_10 | [
"text_plain_output_1.png"
] | structures['nn_1'] = structures['nearestn'].apply(lambda x: x[0])
structures['nn_2'] = structures['nearestn'].apply(lambda x: x[1])
structures['nn_3'] = structures['nearestn'].apply(lambda x: x[2])
structures['nn_4'] = structures['nearestn'].apply(lambda x: x[3])
structures['nn_1_dist'] = structures['nearestn'].apply(lambda x: x[4])
structures['nn_2_dist'] = structures['nearestn'].apply(lambda x: x[5])
structures['nn_3_dist'] = structures['nearestn'].apply(lambda x: x[6])
structures['nn_4_dist'] = structures['nearestn'].apply(lambda x: x[7])
structures['nn_dx_1'] = structures['nearestn'].apply(lambda x: x[8]) - structures['x']
structures['nn_dx_2'] = structures['nearestn'].apply(lambda x: x[9]) - structures['x']
structures['nn_dx_3'] = structures['nearestn'].apply(lambda x: x[10]) - structures['x']
structures['nn_dx_4'] = structures['nearestn'].apply(lambda x: x[11]) - structures['x']
structures['nn_dy_1'] = structures['nearestn'].apply(lambda x: x[12]) - structures['y']
structures['nn_dy_2'] = structures['nearestn'].apply(lambda x: x[13]) - structures['y']
structures['nn_dy_3'] = structures['nearestn'].apply(lambda x: x[14]) - structures['y']
structures['nn_dy_4'] = structures['nearestn'].apply(lambda x: x[15]) - structures['y']
structures['nn_dz_1'] = structures['nearestn'].apply(lambda x: x[16]) - structures['z']
structures['nn_dz_2'] = structures['nearestn'].apply(lambda x: x[17]) - structures['z']
structures['nn_dz_3'] = structures['nearestn'].apply(lambda x: x[18]) - structures['z']
structures['nn_dz_4'] = structures['nearestn'].apply(lambda x: x[19]) - structures['z']
structures['pca_x'] = structures['nearestn'].apply(lambda x: x[20])
structures['pca_y'] = structures['nearestn'].apply(lambda x: x[21])
structures = structures.drop(columns='nearestn', axis=0)
structures.head(n=10) | code |
16154605/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import torch.nn as nn
import torch.nn as nn
import torchvision.models as models
model_v = models.resnet18()
model_c = models.resnet18()
model_c.fc = nn.Linear(512, 10, bias=True)
model_v.fc = nn.Linear(512, 10, bias=True)
print(model_c) | code |
16154605/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from torch.utils.data import DataLoader, Dataset, random_split
import os
import os
import torch
import torch
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, random_split
import torchvision.transforms as transforms
class VowelConsonantDataset(Dataset):
def __init__(self, file_path, train=True, transform=None):
self.transform = transform
self.file_path = file_path
self.train = train
self.file_names = [file for _, _, files in os.walk(self.file_path) for file in files]
self.len = len(self.file_names)
if self.train:
self.classes_mapping = self.get_classes()
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name = self.file_names[index]
image_data = self.pil_loader(self.file_path + '/' + file_name)
if self.transform:
image_data = self.transform(image_data)
if self.train:
file_name_splitted = file_name.split('_')
Y1 = self.classes_mapping[file_name_splitted[0]]
Y2 = self.classes_mapping[file_name_splitted[1]]
z1, z2 = (torch.zeros(10), torch.zeros(10))
z1[Y1 - 10], z2[Y2] = (1, 1)
label = torch.stack([z1, z2])
return (image_data, label)
else:
return (image_data, file_name)
def pil_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_classes(self):
classes = []
for name in self.file_names:
name_splitted = name.split('_')
classes.extend([name_splitted[0], name_splitted[1]])
classes = list(set(classes))
classes_mapping = {}
for i, cl in enumerate(sorted(classes)):
classes_mapping[cl] = i
return classes_mapping
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
train_on_gpu = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()])
full_data = VowelConsonantDataset('../input/train/train', train=True, transform=transform)
train_size = int(0.9 * len(full_data))
test_size = len(full_data) - train_size
train_data, validation_data = random_split(full_data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=60, shuffle=True)
full_data.get_classes() | code |
16154605/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from torch.utils.data import DataLoader, Dataset, random_split
import os
import os
import torch
import torch
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, random_split
import torchvision.transforms as transforms
class VowelConsonantDataset(Dataset):
def __init__(self, file_path, train=True, transform=None):
self.transform = transform
self.file_path = file_path
self.train = train
self.file_names = [file for _, _, files in os.walk(self.file_path) for file in files]
self.len = len(self.file_names)
if self.train:
self.classes_mapping = self.get_classes()
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name = self.file_names[index]
image_data = self.pil_loader(self.file_path + '/' + file_name)
if self.transform:
image_data = self.transform(image_data)
if self.train:
file_name_splitted = file_name.split('_')
Y1 = self.classes_mapping[file_name_splitted[0]]
Y2 = self.classes_mapping[file_name_splitted[1]]
z1, z2 = (torch.zeros(10), torch.zeros(10))
z1[Y1 - 10], z2[Y2] = (1, 1)
label = torch.stack([z1, z2])
return (image_data, label)
else:
return (image_data, file_name)
def pil_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_classes(self):
classes = []
for name in self.file_names:
name_splitted = name.split('_')
classes.extend([name_splitted[0], name_splitted[1]])
classes = list(set(classes))
classes_mapping = {}
for i, cl in enumerate(sorted(classes)):
classes_mapping[cl] = i
return classes_mapping
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
train_on_gpu = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()])
full_data = VowelConsonantDataset('../input/train/train', train=True, transform=transform)
train_size = int(0.9 * len(full_data))
test_size = len(full_data) - train_size
train_data, validation_data = random_split(full_data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=60, shuffle=True)
len(train_loader) | code |
16154605/cell_19 | [
"text_plain_output_5.png",
"text_plain_output_15.png",
"text_plain_output_9.png",
"text_plain_output_20.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_18.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_16.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_plain_output_19.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png"
] | from PIL import Image
from torch.utils.data import DataLoader, Dataset, random_split
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import os
import torch
import torch
import torch.nn as nn
import torch.nn as nn
import torch.optim as optim
import torch.optim as optim
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, random_split
import torchvision.transforms as transforms
class VowelConsonantDataset(Dataset):
def __init__(self, file_path, train=True, transform=None):
self.transform = transform
self.file_path = file_path
self.train = train
self.file_names = [file for _, _, files in os.walk(self.file_path) for file in files]
self.len = len(self.file_names)
if self.train:
self.classes_mapping = self.get_classes()
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name = self.file_names[index]
image_data = self.pil_loader(self.file_path + '/' + file_name)
if self.transform:
image_data = self.transform(image_data)
if self.train:
file_name_splitted = file_name.split('_')
Y1 = self.classes_mapping[file_name_splitted[0]]
Y2 = self.classes_mapping[file_name_splitted[1]]
z1, z2 = (torch.zeros(10), torch.zeros(10))
z1[Y1 - 10], z2[Y2] = (1, 1)
label = torch.stack([z1, z2])
return (image_data, label)
else:
return (image_data, file_name)
def pil_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_classes(self):
classes = []
for name in self.file_names:
name_splitted = name.split('_')
classes.extend([name_splitted[0], name_splitted[1]])
classes = list(set(classes))
classes_mapping = {}
for i, cl in enumerate(sorted(classes)):
classes_mapping[cl] = i
return classes_mapping
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
train_on_gpu = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()])
full_data = VowelConsonantDataset('../input/train/train', train=True, transform=transform)
train_size = int(0.9 * len(full_data))
test_size = len(full_data) - train_size
train_data, validation_data = random_split(full_data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=60, shuffle=True)
test_data = VowelConsonantDataset('../input/test/test', train=False, transform=transform)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=60, shuffle=False)
for data in train_loader:
img, lab = data
im = np.transpose(img[0].numpy(), (1, 2, 0))
im = np.squeeze(im)
break
model_v = models.resnet18()
model_c = models.resnet18()
model_c.fc = nn.Linear(512, 10, bias=True)
model_v.fc = nn.Linear(512, 10, bias=True)
opt_v = optim.Adam(model_v.parameters())
opt_c = optim.Adam(model_c.parameters())
loss_fn_v = nn.CrossEntropyLoss()
loss_fn_c = nn.CrossEntropyLoss()
max_epochs = 20
loss_arr = []
st = 'cuda:0'
model_v.to(st)
model_c.to(st)
for i in tqdm_notebook(range(max_epochs), total=max_epochs, unit='epochs'):
for data in tqdm_notebook(train_loader, total=len(train_loader), unit='batch'):
img, lab = data
img, lab = (img.to(st), lab.to(st))
out_v = model_v(img)
out_c = model_c(img)
opt_v.zero_grad()
opt_c.zero_grad()
val, ind = torch.max(lab[:, 0, :], 1)
val, ind1 = torch.max(lab[:, 1, :], 1)
lab_v = ind
lab_c = ind1
loss = loss_fn_v(out_v, lab_v) + loss_fn_c(out_c, lab_c)
loss.backward()
opt_v.step()
opt_c.step()
del img, lab
loss_arr.append(loss)
torch.max(lab[:, 0, :]).int()
def evaluation(dataloader, model):
total = 0
v = 0
c = 0
for data in tqdm_notebook(dataloader, total=len(dataloader), unit='batch'):
img, lab = data
_, out_v = torch.max(model.VowForward(img), 1)
_, out_c = torch.max(model.ConForward(img), 1)
_, lab1 = torch.max(lab[:, 0, :], 1)
_, lab2 = torch.max(lab[:, 1, :], 1)
total += 64
v += (out_v == lab1).sum().item()
c += (out_c == lab2).sum().item()
print(total, v, c)
print(v / total, c / total)
evaluation(train_loader, model) | code |
16154605/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16154605/cell_18 | [
"text_plain_output_1.png"
] | from PIL import Image
from torch.utils.data import DataLoader, Dataset, random_split
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import os
import torch
import torch
import torch.nn as nn
import torch.nn as nn
import torch.optim as optim
import torch.optim as optim
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, random_split
import torchvision.transforms as transforms
class VowelConsonantDataset(Dataset):
def __init__(self, file_path, train=True, transform=None):
self.transform = transform
self.file_path = file_path
self.train = train
self.file_names = [file for _, _, files in os.walk(self.file_path) for file in files]
self.len = len(self.file_names)
if self.train:
self.classes_mapping = self.get_classes()
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name = self.file_names[index]
image_data = self.pil_loader(self.file_path + '/' + file_name)
if self.transform:
image_data = self.transform(image_data)
if self.train:
file_name_splitted = file_name.split('_')
Y1 = self.classes_mapping[file_name_splitted[0]]
Y2 = self.classes_mapping[file_name_splitted[1]]
z1, z2 = (torch.zeros(10), torch.zeros(10))
z1[Y1 - 10], z2[Y2] = (1, 1)
label = torch.stack([z1, z2])
return (image_data, label)
else:
return (image_data, file_name)
def pil_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_classes(self):
classes = []
for name in self.file_names:
name_splitted = name.split('_')
classes.extend([name_splitted[0], name_splitted[1]])
classes = list(set(classes))
classes_mapping = {}
for i, cl in enumerate(sorted(classes)):
classes_mapping[cl] = i
return classes_mapping
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
train_on_gpu = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()])
full_data = VowelConsonantDataset('../input/train/train', train=True, transform=transform)
train_size = int(0.9 * len(full_data))
test_size = len(full_data) - train_size
train_data, validation_data = random_split(full_data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=60, shuffle=True)
test_data = VowelConsonantDataset('../input/test/test', train=False, transform=transform)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=60, shuffle=False)
for data in train_loader:
img, lab = data
im = np.transpose(img[0].numpy(), (1, 2, 0))
im = np.squeeze(im)
break
model_v = models.resnet18()
model_c = models.resnet18()
model_c.fc = nn.Linear(512, 10, bias=True)
model_v.fc = nn.Linear(512, 10, bias=True)
opt_v = optim.Adam(model_v.parameters())
opt_c = optim.Adam(model_c.parameters())
loss_fn_v = nn.CrossEntropyLoss()
loss_fn_c = nn.CrossEntropyLoss()
max_epochs = 20
loss_arr = []
st = 'cuda:0'
model_v.to(st)
model_c.to(st)
for i in tqdm_notebook(range(max_epochs), total=max_epochs, unit='epochs'):
for data in tqdm_notebook(train_loader, total=len(train_loader), unit='batch'):
img, lab = data
img, lab = (img.to(st), lab.to(st))
out_v = model_v(img)
out_c = model_c(img)
opt_v.zero_grad()
opt_c.zero_grad()
val, ind = torch.max(lab[:, 0, :], 1)
val, ind1 = torch.max(lab[:, 1, :], 1)
lab_v = ind
lab_c = ind1
loss = loss_fn_v(out_v, lab_v) + loss_fn_c(out_c, lab_c)
loss.backward()
opt_v.step()
opt_c.step()
del img, lab
loss_arr.append(loss)
torch.max(lab[:, 0, :]).int() | code |
16154605/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from torch.utils.data import DataLoader, Dataset, random_split
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import os
import torch
import torch
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, random_split
import torchvision.transforms as transforms
class VowelConsonantDataset(Dataset):
def __init__(self, file_path, train=True, transform=None):
self.transform = transform
self.file_path = file_path
self.train = train
self.file_names = [file for _, _, files in os.walk(self.file_path) for file in files]
self.len = len(self.file_names)
if self.train:
self.classes_mapping = self.get_classes()
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name = self.file_names[index]
image_data = self.pil_loader(self.file_path + '/' + file_name)
if self.transform:
image_data = self.transform(image_data)
if self.train:
file_name_splitted = file_name.split('_')
Y1 = self.classes_mapping[file_name_splitted[0]]
Y2 = self.classes_mapping[file_name_splitted[1]]
z1, z2 = (torch.zeros(10), torch.zeros(10))
z1[Y1 - 10], z2[Y2] = (1, 1)
label = torch.stack([z1, z2])
return (image_data, label)
else:
return (image_data, file_name)
def pil_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_classes(self):
classes = []
for name in self.file_names:
name_splitted = name.split('_')
classes.extend([name_splitted[0], name_splitted[1]])
classes = list(set(classes))
classes_mapping = {}
for i, cl in enumerate(sorted(classes)):
classes_mapping[cl] = i
return classes_mapping
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
train_on_gpu = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()])
full_data = VowelConsonantDataset('../input/train/train', train=True, transform=transform)
train_size = int(0.9 * len(full_data))
test_size = len(full_data) - train_size
train_data, validation_data = random_split(full_data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=60, shuffle=True)
for data in train_loader:
img, lab = data
print(img.shape)
im = np.transpose(img[0].numpy(), (1, 2, 0))
print(im.shape)
im = np.squeeze(im)
print(im.shape)
plt.imshow(im)
break | code |
16154605/cell_15 | [
"text_plain_output_1.png"
] | from PIL import Image
from torch.utils.data import DataLoader, Dataset, random_split
import os
import os
import torch
import torch
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, random_split
import torchvision.transforms as transforms
class VowelConsonantDataset(Dataset):
def __init__(self, file_path, train=True, transform=None):
self.transform = transform
self.file_path = file_path
self.train = train
self.file_names = [file for _, _, files in os.walk(self.file_path) for file in files]
self.len = len(self.file_names)
if self.train:
self.classes_mapping = self.get_classes()
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name = self.file_names[index]
image_data = self.pil_loader(self.file_path + '/' + file_name)
if self.transform:
image_data = self.transform(image_data)
if self.train:
file_name_splitted = file_name.split('_')
Y1 = self.classes_mapping[file_name_splitted[0]]
Y2 = self.classes_mapping[file_name_splitted[1]]
z1, z2 = (torch.zeros(10), torch.zeros(10))
z1[Y1 - 10], z2[Y2] = (1, 1)
label = torch.stack([z1, z2])
return (image_data, label)
else:
return (image_data, file_name)
def pil_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_classes(self):
classes = []
for name in self.file_names:
name_splitted = name.split('_')
classes.extend([name_splitted[0], name_splitted[1]])
classes = list(set(classes))
classes_mapping = {}
for i, cl in enumerate(sorted(classes)):
classes_mapping[cl] = i
return classes_mapping
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
train_on_gpu = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()])
full_data = VowelConsonantDataset('../input/train/train', train=True, transform=transform)
train_size = int(0.9 * len(full_data))
test_size = len(full_data) - train_size
train_data, validation_data = random_split(full_data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=60, shuffle=True)
test_data = VowelConsonantDataset('../input/test/test', train=False, transform=transform)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=60, shuffle=False)
print(lab[:, 0, :], torch.max(lab[:, 0, :], 1)) | code |
16154605/cell_16 | [
"text_plain_output_1.png"
] | from PIL import Image
from torch.utils.data import DataLoader, Dataset, random_split
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import os
import torch
import torch
import torch.nn as nn
import torch.nn as nn
import torch.optim as optim
import torch.optim as optim
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, random_split
import torchvision.transforms as transforms
class VowelConsonantDataset(Dataset):
def __init__(self, file_path, train=True, transform=None):
self.transform = transform
self.file_path = file_path
self.train = train
self.file_names = [file for _, _, files in os.walk(self.file_path) for file in files]
self.len = len(self.file_names)
if self.train:
self.classes_mapping = self.get_classes()
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name = self.file_names[index]
image_data = self.pil_loader(self.file_path + '/' + file_name)
if self.transform:
image_data = self.transform(image_data)
if self.train:
file_name_splitted = file_name.split('_')
Y1 = self.classes_mapping[file_name_splitted[0]]
Y2 = self.classes_mapping[file_name_splitted[1]]
z1, z2 = (torch.zeros(10), torch.zeros(10))
z1[Y1 - 10], z2[Y2] = (1, 1)
label = torch.stack([z1, z2])
return (image_data, label)
else:
return (image_data, file_name)
def pil_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_classes(self):
classes = []
for name in self.file_names:
name_splitted = name.split('_')
classes.extend([name_splitted[0], name_splitted[1]])
classes = list(set(classes))
classes_mapping = {}
for i, cl in enumerate(sorted(classes)):
classes_mapping[cl] = i
return classes_mapping
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
train_on_gpu = torch.cuda.is_available()
transform = transforms.Compose([transforms.ToTensor()])
full_data = VowelConsonantDataset('../input/train/train', train=True, transform=transform)
train_size = int(0.9 * len(full_data))
test_size = len(full_data) - train_size
train_data, validation_data = random_split(full_data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=60, shuffle=True)
test_data = VowelConsonantDataset('../input/test/test', train=False, transform=transform)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=60, shuffle=False)
for data in train_loader:
img, lab = data
im = np.transpose(img[0].numpy(), (1, 2, 0))
im = np.squeeze(im)
break
model_v = models.resnet18()
model_c = models.resnet18()
model_c.fc = nn.Linear(512, 10, bias=True)
model_v.fc = nn.Linear(512, 10, bias=True)
opt_v = optim.Adam(model_v.parameters())
opt_c = optim.Adam(model_c.parameters())
loss_fn_v = nn.CrossEntropyLoss()
loss_fn_c = nn.CrossEntropyLoss()
max_epochs = 20
loss_arr = []
st = 'cuda:0'
model_v.to(st)
model_c.to(st)
for i in tqdm_notebook(range(max_epochs), total=max_epochs, unit='epochs'):
for data in tqdm_notebook(train_loader, total=len(train_loader), unit='batch'):
img, lab = data
img, lab = (img.to(st), lab.to(st))
out_v = model_v(img)
out_c = model_c(img)
opt_v.zero_grad()
opt_c.zero_grad()
val, ind = torch.max(lab[:, 0, :], 1)
val, ind1 = torch.max(lab[:, 1, :], 1)
lab_v = ind
lab_c = ind1
loss = loss_fn_v(out_v, lab_v) + loss_fn_c(out_c, lab_c)
loss.backward()
opt_v.step()
opt_c.step()
del img, lab
print(loss)
loss_arr.append(loss) | code |
16154605/cell_17 | [
"text_plain_output_1.png"
] | print(net) | code |
16154605/cell_12 | [
"text_plain_output_1.png"
] | import torchvision.models as models
model_v = models.resnet18()
model_c = models.resnet18()
print(model_c) | code |
73059749/cell_30 | [
"image_output_1.png"
] | from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
checkpoint_callback = ModelCheckpoint(monitor='val_loss', filename='model-{epoch:02d}-{val_loss:.2f}', save_top_k=1, mode='min')
early_stop_callback = EarlyStopping(monitor='val_loss', patience=3, verbose=False, mode='min')
print(f'Best Model: {checkpoint_callback.best_model_path}')
inference_model = CountModel.load_from_checkpoint(checkpoint_callback.best_model_path) | code |
73059749/cell_44 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchmetrics.functional import accuracy, auroc
from torchmetrics.functional import mean_absolute_error, mean_squared_error
from torchvision import transforms
from tqdm import tqdm
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE)
class CountBoxes(Dataset):
def __init__(self, df, transform=None):
super().__init__()
self.df = df
self.transform = transform
self.num_workers = 2
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
img_id, label = (self.df.iloc[index]['image_id'], self.df.iloc[index]['count'])
img_path = f'../input/count-the-blue-boxes/train/train/{label}/{label}_{img_id}.png'
image = Image.open(img_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
class CountBoxesDataModule(pl.LightningDataModule):
def __init__(self, path, batch_size=64, num_workers=4):
super().__init__()
self.path = Path(path)
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def prepare_data(self):
df = pd.read_csv(self.path / 'train.csv')
df = df.loc[df['count'] < MAX_HOURS]
X_train, X_valid, y_train, y_valid = train_test_split(df['image_id'], df['count'], stratify=df['count'], test_size=0.2)
train_df = pd.concat([X_train, y_train], axis=1)
valid_df = pd.concat([X_valid, y_valid], axis=1)
train_df.to_pickle('train_df.pkl')
valid_df.to_pickle('valid_df.pkl')
def setup(self):
self.train_df = pd.read_pickle('train_df.pkl')
self.valid_df = pd.read_pickle('valid_df.pkl')
self.train_transform = self.transform
self.valid_transform = self.transform
def train_dataloader(self):
train_dataset = CountBoxes(df=self.train_df, transform=self.train_transform)
return DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
valid_dataset = CountBoxes(df=self.valid_df, transform=self.valid_transform)
return DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=True)
class CountModel(pl.LightningModule):
def __init__(self, input_shape, num_classes, learning_rate=0.0002):
super().__init__()
self.save_hyperparameters()
self.num_classes = num_classes
self.learning_rate = learning_rate
self.conv1 = nn.Conv2d(3, 16, 5, 1)
self.conv2 = nn.Conv2d(16, 32, 5, 1)
self.conv3 = nn.Conv2d(32, 64, 5, 1)
self.conv4 = nn.Conv2d(64, 128, 5, 1)
self.pool1 = torch.nn.MaxPool2d(2)
self.pool2 = torch.nn.MaxPool2d(2)
self.pool3 = torch.nn.MaxPool2d(2)
n_sizes = self._get_conv_output(input_shape)
self.fc1 = nn.Linear(n_sizes, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.num_classes)
def _get_conv_output(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
def _forward_features(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.pool1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.pool2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.pool3(x))
x = F.relu(self.conv4(x))
return x
def forward(self, x):
x = self._forward_features(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('train_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('train_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('val_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('val_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('val_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('val_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
checkpoint_callback = ModelCheckpoint(monitor='val_loss', filename='model-{epoch:02d}-{val_loss:.2f}', save_top_k=1, mode='min')
early_stop_callback = EarlyStopping(monitor='val_loss', patience=3, verbose=False, mode='min')
inference_model = CountModel.load_from_checkpoint(checkpoint_callback.best_model_path)
inference_model = inference_model.to('cuda')
inference_model.eval()
inference_model.freeze()
def get_predictions(dataloader):
y_preds, y_actuals = ([], [])
for imgs, labels in tqdm(dataloader):
logits = inference_model(imgs.to('cuda'))
preds = torch.argmax(logits, dim=1)
y_actuals.extend(labels.numpy().tolist())
y_preds.extend(preds.cpu().detach().numpy().tolist())
return (y_preds, y_actuals)
class TestDataset(torch.utils.data.Dataset):
def __init__(self, image_paths):
self.image_paths = image_paths
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
img_loc = self.image_paths[idx]
image = Image.open(img_loc)
image = image.convert('RGB')
image = self.transform(image)
return image
submission = pd.read_csv('../input/count-the-blue-boxes/sample_submission.csv')
submission['image_path'] = submission['image_id'].apply(lambda x: f'../input/count-the-blue-boxes/test/test/{x}.png')
test_dataset = TestDataset(submission['image_path'].tolist())
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE, num_workers=2)
print(f'Test Dataset: {len(test_dataset)}\tTest DataLoader: {len(test_dataloader)}') | code |
73059749/cell_20 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms
import matplotlib.pyplot as plt
import pandas as pd
import pytorch_lightning as pl
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE)
class CountBoxes(Dataset):
def __init__(self, df, transform=None):
super().__init__()
self.df = df
self.transform = transform
self.num_workers = 2
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
img_id, label = (self.df.iloc[index]['image_id'], self.df.iloc[index]['count'])
img_path = f'../input/count-the-blue-boxes/train/train/{label}/{label}_{img_id}.png'
image = Image.open(img_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
class CountBoxesDataModule(pl.LightningDataModule):
def __init__(self, path, batch_size=64, num_workers=4):
super().__init__()
self.path = Path(path)
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def prepare_data(self):
df = pd.read_csv(self.path / 'train.csv')
df = df.loc[df['count'] < MAX_HOURS]
X_train, X_valid, y_train, y_valid = train_test_split(df['image_id'], df['count'], stratify=df['count'], test_size=0.2)
train_df = pd.concat([X_train, y_train], axis=1)
valid_df = pd.concat([X_valid, y_valid], axis=1)
train_df.to_pickle('train_df.pkl')
valid_df.to_pickle('valid_df.pkl')
def setup(self):
self.train_df = pd.read_pickle('train_df.pkl')
self.valid_df = pd.read_pickle('valid_df.pkl')
self.train_transform = self.transform
self.valid_transform = self.transform
def train_dataloader(self):
train_dataset = CountBoxes(df=self.train_df, transform=self.train_transform)
return DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
valid_dataset = CountBoxes(df=self.valid_df, transform=self.valid_transform)
return DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=True)
datamodule = CountBoxesDataModule(path='../input/count-the-blue-boxes/', batch_size=BATCH_SIZE)
datamodule.prepare_data()
datamodule.setup()
train_dataloader = datamodule.train_dataloader()
for data in train_dataloader:
images, labels = data
f, ax = plt.subplots(5, 5, figsize=(30, 30))
for i in range(5*5):
ax[i // 5, i % 5].imshow(images[i].permute(1, 2, 0))
ax[i // 5, i % 5].axis("off")
ax[i // 5, i % 5].set_title(labels[i], fontdict={"fontsize": 20})
plt.tight_layout()
plt.show()
break
valid_dataloader = datamodule.val_dataloader()
for data in valid_dataloader:
images, labels = data
f, ax = plt.subplots(5, 5, figsize=(30, 30))
for i in range(5 * 5):
ax[i // 5, i % 5].imshow(images[i].permute(1, 2, 0))
ax[i // 5, i % 5].axis('off')
ax[i // 5, i % 5].set_title(labels[i], fontdict={'fontsize': 20})
plt.tight_layout()
plt.show()
break | code |
73059749/cell_6 | [
"text_plain_output_1.png"
] | from kaggle_secrets import UserSecretsClient
import wandb
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
wandb_api = user_secrets.get_secret('wandb-key')
wandb.login(key=wandb_api) | code |
73059749/cell_40 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from sklearn import metrics
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchmetrics.functional import accuracy, auroc
from torchmetrics.functional import mean_absolute_error, mean_squared_error
from torchvision import transforms
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE)
class CountBoxes(Dataset):
def __init__(self, df, transform=None):
super().__init__()
self.df = df
self.transform = transform
self.num_workers = 2
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
img_id, label = (self.df.iloc[index]['image_id'], self.df.iloc[index]['count'])
img_path = f'../input/count-the-blue-boxes/train/train/{label}/{label}_{img_id}.png'
image = Image.open(img_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
class CountBoxesDataModule(pl.LightningDataModule):
def __init__(self, path, batch_size=64, num_workers=4):
super().__init__()
self.path = Path(path)
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def prepare_data(self):
df = pd.read_csv(self.path / 'train.csv')
df = df.loc[df['count'] < MAX_HOURS]
X_train, X_valid, y_train, y_valid = train_test_split(df['image_id'], df['count'], stratify=df['count'], test_size=0.2)
train_df = pd.concat([X_train, y_train], axis=1)
valid_df = pd.concat([X_valid, y_valid], axis=1)
train_df.to_pickle('train_df.pkl')
valid_df.to_pickle('valid_df.pkl')
def setup(self):
self.train_df = pd.read_pickle('train_df.pkl')
self.valid_df = pd.read_pickle('valid_df.pkl')
self.train_transform = self.transform
self.valid_transform = self.transform
def train_dataloader(self):
train_dataset = CountBoxes(df=self.train_df, transform=self.train_transform)
return DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
valid_dataset = CountBoxes(df=self.valid_df, transform=self.valid_transform)
return DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=True)
class CountModel(pl.LightningModule):
def __init__(self, input_shape, num_classes, learning_rate=0.0002):
super().__init__()
self.save_hyperparameters()
self.num_classes = num_classes
self.learning_rate = learning_rate
self.conv1 = nn.Conv2d(3, 16, 5, 1)
self.conv2 = nn.Conv2d(16, 32, 5, 1)
self.conv3 = nn.Conv2d(32, 64, 5, 1)
self.conv4 = nn.Conv2d(64, 128, 5, 1)
self.pool1 = torch.nn.MaxPool2d(2)
self.pool2 = torch.nn.MaxPool2d(2)
self.pool3 = torch.nn.MaxPool2d(2)
n_sizes = self._get_conv_output(input_shape)
self.fc1 = nn.Linear(n_sizes, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.num_classes)
def _get_conv_output(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
def _forward_features(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.pool1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.pool2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.pool3(x))
x = F.relu(self.conv4(x))
return x
def forward(self, x):
x = self._forward_features(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('train_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('train_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('val_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('val_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('val_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('val_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
def print_metrics(stage, y_true, y_pred):
acc = metrics.accuracy_score(y_true, y_pred)
mae = metrics.mean_absolute_error(y_true, y_pred)
mse = metrics.mean_squared_error(y_true, y_pred)
print_metrics('Training', train_pred, train_act) | code |
73059749/cell_39 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchmetrics.functional import accuracy, auroc
from torchmetrics.functional import mean_absolute_error, mean_squared_error
from torchvision import transforms
from tqdm import tqdm
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE)
class CountBoxes(Dataset):
def __init__(self, df, transform=None):
super().__init__()
self.df = df
self.transform = transform
self.num_workers = 2
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
img_id, label = (self.df.iloc[index]['image_id'], self.df.iloc[index]['count'])
img_path = f'../input/count-the-blue-boxes/train/train/{label}/{label}_{img_id}.png'
image = Image.open(img_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
class CountBoxesDataModule(pl.LightningDataModule):
def __init__(self, path, batch_size=64, num_workers=4):
super().__init__()
self.path = Path(path)
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def prepare_data(self):
df = pd.read_csv(self.path / 'train.csv')
df = df.loc[df['count'] < MAX_HOURS]
X_train, X_valid, y_train, y_valid = train_test_split(df['image_id'], df['count'], stratify=df['count'], test_size=0.2)
train_df = pd.concat([X_train, y_train], axis=1)
valid_df = pd.concat([X_valid, y_valid], axis=1)
train_df.to_pickle('train_df.pkl')
valid_df.to_pickle('valid_df.pkl')
def setup(self):
self.train_df = pd.read_pickle('train_df.pkl')
self.valid_df = pd.read_pickle('valid_df.pkl')
self.train_transform = self.transform
self.valid_transform = self.transform
def train_dataloader(self):
train_dataset = CountBoxes(df=self.train_df, transform=self.train_transform)
return DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
valid_dataset = CountBoxes(df=self.valid_df, transform=self.valid_transform)
return DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=True)
datamodule = CountBoxesDataModule(path='../input/count-the-blue-boxes/', batch_size=BATCH_SIZE)
datamodule.prepare_data()
datamodule.setup()
train_dataloader = datamodule.train_dataloader()
valid_dataloader = datamodule.val_dataloader()
class CountModel(pl.LightningModule):
def __init__(self, input_shape, num_classes, learning_rate=0.0002):
super().__init__()
self.save_hyperparameters()
self.num_classes = num_classes
self.learning_rate = learning_rate
self.conv1 = nn.Conv2d(3, 16, 5, 1)
self.conv2 = nn.Conv2d(16, 32, 5, 1)
self.conv3 = nn.Conv2d(32, 64, 5, 1)
self.conv4 = nn.Conv2d(64, 128, 5, 1)
self.pool1 = torch.nn.MaxPool2d(2)
self.pool2 = torch.nn.MaxPool2d(2)
self.pool3 = torch.nn.MaxPool2d(2)
n_sizes = self._get_conv_output(input_shape)
self.fc1 = nn.Linear(n_sizes, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.num_classes)
def _get_conv_output(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
def _forward_features(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.pool1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.pool2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.pool3(x))
x = F.relu(self.conv4(x))
return x
def forward(self, x):
x = self._forward_features(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('train_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('train_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('val_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('val_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('val_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('val_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
checkpoint_callback = ModelCheckpoint(monitor='val_loss', filename='model-{epoch:02d}-{val_loss:.2f}', save_top_k=1, mode='min')
early_stop_callback = EarlyStopping(monitor='val_loss', patience=3, verbose=False, mode='min')
inference_model = CountModel.load_from_checkpoint(checkpoint_callback.best_model_path)
inference_model = inference_model.to('cuda')
inference_model.eval()
inference_model.freeze()
def get_predictions(dataloader):
y_preds, y_actuals = ([], [])
for imgs, labels in tqdm(dataloader):
logits = inference_model(imgs.to('cuda'))
preds = torch.argmax(logits, dim=1)
y_actuals.extend(labels.numpy().tolist())
y_preds.extend(preds.cpu().detach().numpy().tolist())
return (y_preds, y_actuals)
valid_dataloader = datamodule.val_dataloader()
train_dataloader = datamodule.train_dataloader()
train_pred, train_act = get_predictions(train_dataloader) | code |
73059749/cell_2 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | !pip install -q wandb | code |
73059749/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from kaggle_secrets import UserSecretsClient
from pytorch_lightning.loggers import WandbLogger
import wandb
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
wandb_api = user_secrets.get_secret('wandb-key')
wandb.login(key=wandb_api)
wandb.init(project='count-the-green-boxes')
wandb_logger = WandbLogger(project='count-green-boxes-lightning', job_type='train') | code |
73059749/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from PIL import Image
from kaggle_secrets import UserSecretsClient
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.loggers import WandbLogger
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchmetrics.functional import accuracy, auroc
from torchmetrics.functional import mean_absolute_error, mean_squared_error
from torchvision import transforms
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import wandb
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
wandb_api = user_secrets.get_secret('wandb-key')
wandb.login(key=wandb_api)
wandb.init(project='count-the-green-boxes')
wandb_logger = WandbLogger(project='count-green-boxes-lightning', job_type='train')
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE)
class CountBoxes(Dataset):
def __init__(self, df, transform=None):
super().__init__()
self.df = df
self.transform = transform
self.num_workers = 2
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
img_id, label = (self.df.iloc[index]['image_id'], self.df.iloc[index]['count'])
img_path = f'../input/count-the-blue-boxes/train/train/{label}/{label}_{img_id}.png'
image = Image.open(img_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
class CountBoxesDataModule(pl.LightningDataModule):
def __init__(self, path, batch_size=64, num_workers=4):
super().__init__()
self.path = Path(path)
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def prepare_data(self):
df = pd.read_csv(self.path / 'train.csv')
df = df.loc[df['count'] < MAX_HOURS]
X_train, X_valid, y_train, y_valid = train_test_split(df['image_id'], df['count'], stratify=df['count'], test_size=0.2)
train_df = pd.concat([X_train, y_train], axis=1)
valid_df = pd.concat([X_valid, y_valid], axis=1)
train_df.to_pickle('train_df.pkl')
valid_df.to_pickle('valid_df.pkl')
def setup(self):
self.train_df = pd.read_pickle('train_df.pkl')
self.valid_df = pd.read_pickle('valid_df.pkl')
self.train_transform = self.transform
self.valid_transform = self.transform
def train_dataloader(self):
train_dataset = CountBoxes(df=self.train_df, transform=self.train_transform)
return DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
valid_dataset = CountBoxes(df=self.valid_df, transform=self.valid_transform)
return DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=True)
datamodule = CountBoxesDataModule(path='../input/count-the-blue-boxes/', batch_size=BATCH_SIZE)
datamodule.prepare_data()
datamodule.setup()
train_dataloader = datamodule.train_dataloader()
valid_dataloader = datamodule.val_dataloader()
class CountModel(pl.LightningModule):
def __init__(self, input_shape, num_classes, learning_rate=0.0002):
super().__init__()
self.save_hyperparameters()
self.num_classes = num_classes
self.learning_rate = learning_rate
self.conv1 = nn.Conv2d(3, 16, 5, 1)
self.conv2 = nn.Conv2d(16, 32, 5, 1)
self.conv3 = nn.Conv2d(32, 64, 5, 1)
self.conv4 = nn.Conv2d(64, 128, 5, 1)
self.pool1 = torch.nn.MaxPool2d(2)
self.pool2 = torch.nn.MaxPool2d(2)
self.pool3 = torch.nn.MaxPool2d(2)
n_sizes = self._get_conv_output(input_shape)
self.fc1 = nn.Linear(n_sizes, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.num_classes)
def _get_conv_output(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
def _forward_features(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.pool1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.pool2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.pool3(x))
x = F.relu(self.conv4(x))
return x
def forward(self, x):
x = self._forward_features(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('train_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('train_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('val_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('val_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('val_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('val_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
checkpoint_callback = ModelCheckpoint(monitor='val_loss', filename='model-{epoch:02d}-{val_loss:.2f}', save_top_k=1, mode='min')
early_stop_callback = EarlyStopping(monitor='val_loss', patience=3, verbose=False, mode='min')
model = CountModel((3, 250, 500), NUM_CLASSES)
trainer = pl.Trainer(max_epochs=3, progress_bar_refresh_rate=5, gpus=1, callbacks=[checkpoint_callback, early_stop_callback], logger=wandb_logger)
trainer.fit(model, datamodule) | code |
73059749/cell_17 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from pathlib import Path
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms
import matplotlib.pyplot as plt
import pandas as pd
import pytorch_lightning as pl
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE)
class CountBoxes(Dataset):
def __init__(self, df, transform=None):
super().__init__()
self.df = df
self.transform = transform
self.num_workers = 2
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
img_id, label = (self.df.iloc[index]['image_id'], self.df.iloc[index]['count'])
img_path = f'../input/count-the-blue-boxes/train/train/{label}/{label}_{img_id}.png'
image = Image.open(img_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
class CountBoxesDataModule(pl.LightningDataModule):
def __init__(self, path, batch_size=64, num_workers=4):
super().__init__()
self.path = Path(path)
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def prepare_data(self):
df = pd.read_csv(self.path / 'train.csv')
df = df.loc[df['count'] < MAX_HOURS]
X_train, X_valid, y_train, y_valid = train_test_split(df['image_id'], df['count'], stratify=df['count'], test_size=0.2)
train_df = pd.concat([X_train, y_train], axis=1)
valid_df = pd.concat([X_valid, y_valid], axis=1)
train_df.to_pickle('train_df.pkl')
valid_df.to_pickle('valid_df.pkl')
def setup(self):
self.train_df = pd.read_pickle('train_df.pkl')
self.valid_df = pd.read_pickle('valid_df.pkl')
self.train_transform = self.transform
self.valid_transform = self.transform
def train_dataloader(self):
train_dataset = CountBoxes(df=self.train_df, transform=self.train_transform)
return DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
valid_dataset = CountBoxes(df=self.valid_df, transform=self.valid_transform)
return DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=True)
datamodule = CountBoxesDataModule(path='../input/count-the-blue-boxes/', batch_size=BATCH_SIZE)
datamodule.prepare_data()
datamodule.setup()
train_dataloader = datamodule.train_dataloader()
for data in train_dataloader:
images, labels = data
f, ax = plt.subplots(5, 5, figsize=(30, 30))
for i in range(5 * 5):
ax[i // 5, i % 5].imshow(images[i].permute(1, 2, 0))
ax[i // 5, i % 5].axis('off')
ax[i // 5, i % 5].set_title(labels[i], fontdict={'fontsize': 20})
plt.tight_layout()
plt.show()
break | code |
73059749/cell_10 | [
"text_plain_output_1.png"
] | import pytorch_lightning as pl
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE) | code |
73059749/cell_37 | [
"text_plain_output_1.png"
] | from PIL import Image
from pathlib import Path
from sklearn import metrics
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchmetrics.functional import accuracy, auroc
from torchmetrics.functional import mean_absolute_error, mean_squared_error
from torchvision import transforms
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
RANDOM_STATE = 42
NUM_CLASSES = 98
MAX_HOURS = 90
BATCH_SIZE = 128
pl.seed_everything(RANDOM_STATE)
class CountBoxes(Dataset):
def __init__(self, df, transform=None):
super().__init__()
self.df = df
self.transform = transform
self.num_workers = 2
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
img_id, label = (self.df.iloc[index]['image_id'], self.df.iloc[index]['count'])
img_path = f'../input/count-the-blue-boxes/train/train/{label}/{label}_{img_id}.png'
image = Image.open(img_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
class CountBoxesDataModule(pl.LightningDataModule):
def __init__(self, path, batch_size=64, num_workers=4):
super().__init__()
self.path = Path(path)
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.Resize(size=(250, 500)), transforms.ToTensor()])
def prepare_data(self):
df = pd.read_csv(self.path / 'train.csv')
df = df.loc[df['count'] < MAX_HOURS]
X_train, X_valid, y_train, y_valid = train_test_split(df['image_id'], df['count'], stratify=df['count'], test_size=0.2)
train_df = pd.concat([X_train, y_train], axis=1)
valid_df = pd.concat([X_valid, y_valid], axis=1)
train_df.to_pickle('train_df.pkl')
valid_df.to_pickle('valid_df.pkl')
def setup(self):
self.train_df = pd.read_pickle('train_df.pkl')
self.valid_df = pd.read_pickle('valid_df.pkl')
self.train_transform = self.transform
self.valid_transform = self.transform
def train_dataloader(self):
train_dataset = CountBoxes(df=self.train_df, transform=self.train_transform)
return DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
valid_dataset = CountBoxes(df=self.valid_df, transform=self.valid_transform)
return DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=True)
class CountModel(pl.LightningModule):
def __init__(self, input_shape, num_classes, learning_rate=0.0002):
super().__init__()
self.save_hyperparameters()
self.num_classes = num_classes
self.learning_rate = learning_rate
self.conv1 = nn.Conv2d(3, 16, 5, 1)
self.conv2 = nn.Conv2d(16, 32, 5, 1)
self.conv3 = nn.Conv2d(32, 64, 5, 1)
self.conv4 = nn.Conv2d(64, 128, 5, 1)
self.pool1 = torch.nn.MaxPool2d(2)
self.pool2 = torch.nn.MaxPool2d(2)
self.pool3 = torch.nn.MaxPool2d(2)
n_sizes = self._get_conv_output(input_shape)
self.fc1 = nn.Linear(n_sizes, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.num_classes)
def _get_conv_output(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
def _forward_features(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.pool1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.pool2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.pool3(x))
x = F.relu(self.conv4(x))
return x
def forward(self, x):
x = self._forward_features(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('train_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('train_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
mae = mean_absolute_error(preds, y)
mse = mean_squared_error(preds, y)
self.log('val_loss', loss, on_step=True, on_epoch=True, logger=True)
self.log('val_acc', acc, on_step=True, on_epoch=True, logger=True)
self.log('val_mae', mae, on_step=True, on_epoch=True, logger=True)
self.log('val_mse', mse, on_step=True, on_epoch=True, logger=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
def print_metrics(stage, y_true, y_pred):
acc = metrics.accuracy_score(y_true, y_pred)
mae = metrics.mean_absolute_error(y_true, y_pred)
mse = metrics.mean_squared_error(y_true, y_pred)
print_metrics('Validation', valid_pred, valid_act) | code |
72101516/cell_21 | [
"text_html_output_1.png"
] | from plotly.offline import init_notebook_mode, iplot, plot
import pandas as pd
import plotly.graph_objs as go
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
gender['male_ratio'] = gender['Male'] / gender['Total']
gender['female_ratio'] = gender['Female'] / gender['Total']
gender['Discipline'] = gender.index
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
init_notebook_mode(connected=True)
import plotly.graph_objs as go
fig = go.Figure()
fig.add_trace(go.Bar(y=gender.Discipline, x=gender.female_ratio, orientation='h', name='Females'))
fig.add_trace(go.Bar(y=gender.Discipline, x=gender.male_ratio, orientation='h', name='Males'))
template = dict(layout=go.Layout(title_font=dict(family='Rockwell', size=30)))
fig.update_layout(title='Distribution of disciplines based on gender', template=template, barmode='stack', autosize=False, width=680, height=900, margin=dict(l=150, r=100, b=30, t=100, pad=4))
fig.layout.xaxis.tickformat = ',.0%'
fig.show() | code |
72101516/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
def miss(data):
missing_value = data.isnull().sum().sort_values(ascending=False)
missing_perc = (data.isnull().sum() * 100 / data.shape[0]).sort_values(ascending=False)
value = pd.concat([missing_value, missing_perc], axis=1, keys=['Count', '%'])
miss(gender) | code |
72101516/cell_2 | [
"text_html_output_1.png"
] | !pip install openpyxl # Solving Kaggle error while importing the data | code |
72101516/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
def miss(data):
missing_value = data.isnull().sum().sort_values(ascending=False)
missing_perc = (data.isnull().sum() * 100 / data.shape[0]).sort_values(ascending=False)
value = pd.concat([missing_value, missing_perc], axis=1, keys=['Count', '%'])
miss(coach) | code |
72101516/cell_1 | [
"text_plain_output_1.png"
] | import nltk
import os
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
import seaborn as sns
sns.set()
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
import tensorflow_datasets as tfds
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from collections import Counter
import nltk
nltk.download('stopwords')
import re
from collections import defaultdict
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from xgboost import XGBRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from scipy import stats
from scipy.stats import norm, skew
from scipy.special import boxcox1p
from sklearn.preprocessing import RobustScaler
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72101516/cell_7 | [
"image_output_1.png"
] | import pandas as pd
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
def miss(data):
missing_value = data.isnull().sum().sort_values(ascending=False)
missing_perc = (data.isnull().sum() * 100 / data.shape[0]).sort_values(ascending=False)
value = pd.concat([missing_value, missing_perc], axis=1, keys=['Count', '%'])
miss(medal) | code |
72101516/cell_8 | [
"text_html_output_2.png"
] | import pandas as pd
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
def miss(data):
missing_value = data.isnull().sum().sort_values(ascending=False)
missing_perc = (data.isnull().sum() * 100 / data.shape[0]).sort_values(ascending=False)
value = pd.concat([missing_value, missing_perc], axis=1, keys=['Count', '%'])
miss(athlete) | code |
72101516/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import nltk
import os
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
import seaborn as sns
sns.set()
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
import tensorflow_datasets as tfds
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from collections import Counter
import nltk
nltk.download('stopwords')
import re
from collections import defaultdict
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from xgboost import XGBRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from scipy import stats
from scipy.stats import norm, skew
from scipy.special import boxcox1p
from sklearn.preprocessing import RobustScaler
import os
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
count1 = athlete['NOC'].value_counts().head(10)
sns.barplot(x=count1.index,y = count1.values)
plt.xticks(rotation = 90)
plt.show()
count = athlete['Discipline'].value_counts().head(10)
sns.barplot(x=count.index, y=count.values)
plt.xticks(rotation=90)
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.