path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105190994/cell_15
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df (df.head(3), df.tail(2), df.columns, df.index, df.shape)
code
105190994/cell_17
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.info()
code
105190994/cell_24
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.loc[:, 'A'] df.iloc[0:2, 3:] df[df.A < 0]
code
105190994/cell_22
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.loc[:, 'A']
code
105190994/cell_27
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4
code
105190994/cell_12
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df
code
105190994/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) iris.head()
code
106201680/cell_21
[ "image_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_train
code
106201680/cell_9
[ "image_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df.info()
code
106201680/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df
code
106201680/cell_34
[ "text_plain_output_1.png" ]
from prophet import Prophet import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) # Visualisai hasil prediksi machine learning f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) fig = model.plot(hasil_prediksi, ax=ax) plt.show() f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) ax.plot(df_test['ds'], df_test['y'], color='r') fig = model.plot(hasil_prediksi, ax=ax)
code
106201680/cell_30
[ "text_html_output_1.png" ]
from prophet import Prophet import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) hasil_prediksi['yhat']
code
106201680/cell_33
[ "text_html_output_1.png" ]
from prophet import Prophet import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) fig = model.plot(hasil_prediksi, ax=ax) plt.show()
code
106201680/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df[df['Month'].duplicated()]
code
106201680/cell_40
[ "image_output_1.png" ]
from prophet import Prophet import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df['Month'] = pd.to_datetime(df['Month'], format='%Y-%m') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) # Visualisai hasil prediksi machine learning f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) fig = model.plot(hasil_prediksi, ax=ax) plt.show() # Visualisasi perbandingan antara hasil prediksi machine learning dengan data yang asli f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) ax.plot(df_test['ds'], df_test['y'], color='r') fig = model.plot(hasil_prediksi, ax=ax) prediksi_tahun_berikutnya = pd.DataFrame(columns=['ds', 'y']) hasil_prediksi_satu_tahun_kedepan = model.predict(prediksi_tahun_berikutnya) f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) ax.plot(df_test['ds'], df_test['y'], color='r') fig = model.plot(hasil_prediksi_satu_tahun_kedepan, ax=ax) fig = plt.suptitle('Prediksi Penumpang Satu Tahun Kedepan') plt.show()
code
106201680/cell_29
[ "text_html_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test df_test['y']
code
106201680/cell_39
[ "text_plain_output_1.png" ]
from prophet import Prophet import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df['Month'] = pd.to_datetime(df['Month'], format='%Y-%m') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) # Visualisai hasil prediksi machine learning f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) fig = model.plot(hasil_prediksi, ax=ax) plt.show() # Visualisasi perbandingan antara hasil prediksi machine learning dengan data yang asli f, ax = plt.subplots(1) f.set_figheight(5) f.set_figwidth(15) ax.plot(df_test['ds'], df_test['y'], color='r') fig = model.plot(hasil_prediksi, ax=ax) prediksi_tahun_berikutnya = pd.DataFrame(columns=['ds', 'y']) hasil_prediksi_satu_tahun_kedepan = model.predict(prediksi_tahun_berikutnya) hasil_prediksi_satu_tahun_kedepan
code
106201680/cell_26
[ "text_html_output_1.png" ]
from prophet import Prophet import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) hasil_prediksi
code
106201680/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df
code
106201680/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train
code
106201680/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106201680/cell_32
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from prophet import Prophet from sklearn.metrics import mean_absolute_error import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) mean_absolute_error(df_test['y'], hasil_prediksi['yhat'])
code
106201680/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df[df['Month'].duplicated()]
code
106201680/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') plt.show()
code
106201680/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train
code
106201680/cell_31
[ "text_html_output_1.png" ]
from prophet import Prophet from sklearn.metrics import mean_squared_error import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test model = Prophet() model.fit(df_train) hasil_prediksi = model.predict(df_test) mean_squared_error(df_test['y'], hasil_prediksi['yhat'])
code
106201680/cell_24
[ "image_output_1.png" ]
from prophet import Prophet import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_train.reset_index(inplace=True) df_train.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) model = Prophet() model.fit(df_train)
code
106201680/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() test_set_size
code
106201680/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] test_ratio = 0.2 test_set_size = int(len(df) * test_ratio) df_train = df[0:-test_set_size].copy() df_test = df[-test_set_size:].copy() df_test.rename(columns={'#Passengers': 'TEST SET'}).join(df_train.rename(columns={'#Passengers': 'TRAINING SET'}), how='outer').plot(figsize=(15, 5), title='Penumpang Pesawat', style='-') df_test.reset_index(inplace=True) df_test.rename(columns={'Month': 'ds', '#Passengers': 'y'}, inplace=True) df_test
code
106201680/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df
code
106201680/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df['Month'] = pd.to_datetime(df['Month'], format='%Y-%m') prediksi_tahun_berikutnya = pd.DataFrame(columns=['ds', 'y']) prediksi_tahun_berikutnya
code
106201680/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pylab as plt # Untuk visualisasi import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df = df.set_index(['Month']) df color_pal = ['#F8766D', '#D39200', '#93AA00', '#00BA38', '#00C19F', '#00B9E3', '#619CFF', '#DB72FB'] df.plot(style='-', figsize=(15, 5), color=color_pal[0], title='Penumpang Pesawat') plt.show()
code
106201680/cell_5
[ "image_output_1.png" ]
import pandas as pd # Untuk mengolah data import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/airpassengers/AirPassengers.csv') df.info()
code
332165/cell_1
[ "text_plain_output_1.png" ]
library(ggplot2) library(readr) system('ls ../input')
code
332165/cell_3
[ "text_html_output_1.png" ]
data = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', encoding='utf-8', low_memory=False) data.groupby(['SchoolDegree', 'CountryLive'])['Income'].mean()
code
128010580/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('austin_weather.csv') df
code
17118469/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd auto = pd.read_csv('../input/cnt_km_year_powerPS_minPrice_maxPrice_avgPrice_sdPrice.csv') p = auto.hist(figsize = (20,20)) plt.matshow(auto.corr()) plt.colorbar() plt.show()
code
17118469/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns auto = pd.read_csv('../input/cnt_km_year_powerPS_minPrice_maxPrice_avgPrice_sdPrice.csv') p = auto.hist(figsize = (20,20)) plt.colorbar() plt.figure(figsize=(10, 7)) sns.scatterplot(x='year', y='avgPrice', data=auto) plt.show()
code
17118469/cell_2
[ "text_html_output_1.png" ]
import pandas as pd auto = pd.read_csv('../input/cnt_km_year_powerPS_minPrice_maxPrice_avgPrice_sdPrice.csv') auto.head()
code
17118469/cell_1
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from matplotlib import cm sns.set_style('ticks') import plotly.offline as py import matplotlib.ticker as mtick py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.tools as tls plt.xkcd()
code
17118469/cell_3
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd auto = pd.read_csv('../input/cnt_km_year_powerPS_minPrice_maxPrice_avgPrice_sdPrice.csv') p = auto.hist(figsize=(20, 20))
code
17118469/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns auto = pd.read_csv('../input/cnt_km_year_powerPS_minPrice_maxPrice_avgPrice_sdPrice.csv') p = auto.hist(figsize = (20,20)) plt.colorbar() plt.figure(figsize=(10, 7)) sns.scatterplot(x='year', y='maxPrice', data=auto) plt.show()
code
90111888/cell_13
[ "text_html_output_1.png" ]
train_identity.info()
code
90111888/cell_9
[ "text_plain_output_1.png" ]
train_transactions = pd.read_csv('../input/train_transaction.csv') train_identity = pd.read_csv('../input/train_identity.csv') print('Train data set is loaded !')
code
90111888/cell_34
[ "text_plain_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') train_df['id_38'].value_counts()
code
90111888/cell_33
[ "text_plain_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') train_df['ProductCD'].value_counts()
code
90111888/cell_20
[ "text_plain_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity train_df['R_emaildomain'].value_counts()
code
90111888/cell_40
[ "text_plain_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') object_cols = [col for col in train_df.columns if train_df[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(train_df[col]) == set(test_df[col])] bad_label_cols = list(set(object_cols) - set(good_label_cols)) train_df.drop(bad_label_cols, axis=1, inplace=True) test_df.drop(bad_label_cols, axis=1, inplace=True) low_cardinality_cols = [col for col in good_label_cols if train_df[col].nunique() < 10] high_cardinality_cols = list(set(good_label_cols) - set(low_cardinality_cols)) train_df[low_cardinality_cols].head()
code
90111888/cell_29
[ "text_plain_output_1.png" ]
train_df = pd.read_pickle('train_df.pkl') test_df = pd.read_pickle('test_df.pkl')
code
90111888/cell_39
[ "text_plain_output_1.png" ]
for f in high_cardinality_cols: lbl_enc = LabelEncoder() lbl_enc.fit(list(train_df[f].values)) print(f'{f}: {lbl_enc.classes_}') train_df[f] = lbl_enc.transform(list(train_df[f].values)) test_df[f] = lbl_enc.transform(list(test_df[f].values))
code
90111888/cell_26
[ "text_plain_output_1.png" ]
test_df = reduce_mem_usage(test_df)
code
90111888/cell_48
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import make_scorer, roc_auc_score from sklearn.metrics import roc_auc_score, roc_curve clf_rf_down = RandomForestClassifier(random_state=42, n_estimators=50) model_rf_down = clf_rf_down.fit(X_train_sm, y_train_sm) y_prob = model_rf_down.predict_proba(X_test)[:, 1] print(f'ROC-AUC score: {roc_auc_score(y_test, y_prob):.3f}')
code
90111888/cell_11
[ "text_html_output_1.png" ]
train_transactions.info()
code
90111888/cell_19
[ "text_plain_output_1.png" ]
train_df = reduce_mem_usage(train_df)
code
90111888/cell_52
[ "text_plain_output_1.png" ]
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_threshold=True) cross_validation = StratifiedKFold(n_splits=3, shuffle=True, random_state=42) clf = xgb.XGBClassifier(nthread=1, random_state=42) param_grid = {'max_depth': [15, 20], 'min_samples_split': [3], 'learning_rate': [0.05, 0.1], 'min_child_weight': [5, 11, 15], 'silent': [1], 'subsample': [0.8], 'colsample_bytree': [0.7], 'n_estimators': [500], 'missing': [-999]} search = GridSearchCV(clf, param_grid, cv=cross_validation, scoring=roc_auc_scorer, n_jobs=-1).fit(X_train_sm, y_train_sm) print(search.best_params_) y_prob = search.predict_proba(X_test)[:, 1] print(f'ROC-AUC score: {roc_auc_score(y_test, y_prob):.3f}') predictions_cv_xgb = search.predict_proba(OH_test_df)[:, 1] submission = pd.DataFrame({'TransactionID': OH_test_df.index, 'isFraud': predictions_cv_xgb}) submission['TransactionID'] = submission['TransactionID'].astype(int) filename = 'cv_xgb_model_submission.csv' submission.to_csv(filename, index=False) print(f'Saved file: {filename}')
code
90111888/cell_45
[ "text_html_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder from sklearn.utils import resample import pandas as pd import seaborn as sns train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') object_cols = [col for col in train_df.columns if train_df[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(train_df[col]) == set(test_df[col])] bad_label_cols = list(set(object_cols) - set(good_label_cols)) train_df.drop(bad_label_cols, axis=1, inplace=True) test_df.drop(bad_label_cols, axis=1, inplace=True) low_cardinality_cols = [col for col in good_label_cols if train_df[col].nunique() < 10] high_cardinality_cols = list(set(good_label_cols) - set(low_cardinality_cols)) from sklearn.preprocessing import OneHotEncoder OH_encoder = OneHotEncoder(drop='first', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(train_df[low_cardinality_cols].astype('str'))) OH_cols_valid = pd.DataFrame(OH_encoder.transform(test_df[low_cardinality_cols].astype('str'))) OH_cols_train.index = train_df.index OH_cols_valid.index = test_df.index num_X_train = train_df.drop(low_cardinality_cols, axis=1) num_X_valid = test_df.drop(low_cardinality_cols, axis=1) del train_df, test_df OH_train_df = pd.concat([num_X_train, OH_cols_train], axis=1) OH_test_df = pd.concat([num_X_valid, OH_cols_valid], axis=1) df_majority_downsampled, y_majority_downsampled = resample(X_train[y_train == 0], y_train[y_train == 0], replace=False, n_samples=3 * len(y_train[y_train == 1]), random_state=42) X_down_train = pd.concat([X_train[y_train == 1], df_majority_downsampled]) y_down_train = pd.concat([y_train[y_train == 1], y_majority_downsampled]) sns.countplot(x=y_down_train)
code
90111888/cell_32
[ "text_plain_output_1.png" ]
train_df = train_df.fillna(-999) test_df = test_df.fillna(-999)
code
90111888/cell_38
[ "text_plain_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') object_cols = [col for col in train_df.columns if train_df[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(train_df[col]) == set(test_df[col])] bad_label_cols = list(set(object_cols) - set(good_label_cols)) train_df.drop(bad_label_cols, axis=1, inplace=True) test_df.drop(bad_label_cols, axis=1, inplace=True) low_cardinality_cols = [col for col in good_label_cols if train_df[col].nunique() < 10] high_cardinality_cols = list(set(good_label_cols) - set(low_cardinality_cols)) print('Categorical columns that will be one-hot encoded:', low_cardinality_cols) print('\nCategorical columns that will be label encoded:', high_cardinality_cols)
code
90111888/cell_17
[ "image_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') print('Train shape', train_df.shape) print('Data set merged ') del train_transactions, train_identity
code
90111888/cell_35
[ "text_plain_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') object_cols = [col for col in train_df.columns if train_df[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(train_df[col]) == set(test_df[col])] bad_label_cols = list(set(object_cols) - set(good_label_cols)) print('Categorical columns that will be label encoded:', good_label_cols) print('\nCategorical columns that will be dropped from the dataset:', bad_label_cols)
code
90111888/cell_46
[ "image_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder from sklearn.utils import resample import pandas as pd import seaborn as sns train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') object_cols = [col for col in train_df.columns if train_df[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(train_df[col]) == set(test_df[col])] bad_label_cols = list(set(object_cols) - set(good_label_cols)) train_df.drop(bad_label_cols, axis=1, inplace=True) test_df.drop(bad_label_cols, axis=1, inplace=True) low_cardinality_cols = [col for col in good_label_cols if train_df[col].nunique() < 10] high_cardinality_cols = list(set(good_label_cols) - set(low_cardinality_cols)) from sklearn.preprocessing import OneHotEncoder OH_encoder = OneHotEncoder(drop='first', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(train_df[low_cardinality_cols].astype('str'))) OH_cols_valid = pd.DataFrame(OH_encoder.transform(test_df[low_cardinality_cols].astype('str'))) OH_cols_train.index = train_df.index OH_cols_valid.index = test_df.index num_X_train = train_df.drop(low_cardinality_cols, axis=1) num_X_valid = test_df.drop(low_cardinality_cols, axis=1) del train_df, test_df OH_train_df = pd.concat([num_X_train, OH_cols_train], axis=1) OH_test_df = pd.concat([num_X_valid, OH_cols_valid], axis=1) df_majority_downsampled, y_majority_downsampled = resample(X_train[y_train == 0], y_train[y_train == 0], replace=False, n_samples=3 * len(y_train[y_train == 1]), random_state=42) X_down_train = pd.concat([X_train[y_train == 1], df_majority_downsampled]) y_down_train = pd.concat([y_train[y_train == 1], y_majority_downsampled]) X_train_sm, y_train_sm = (X_down_train, y_down_train) sns.countplot(x=y_train_sm)
code
90111888/cell_24
[ "text_plain_output_1.png" ]
test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') print('Train shape', train_df.shape) print('Data set merged ') del test_transaction, test_identity
code
90111888/cell_14
[ "text_plain_output_1.png" ]
import seaborn as sns sns.countplot(x=train_transactions['isFraud'])
code
90111888/cell_22
[ "text_plain_output_1.png" ]
test_transaction = pd.read_csv('../input/test_transaction.csv') test_identity = pd.read_csv('../input/test_identity.csv') sample_submission = pd.read_csv('../input/sample_submission.csv') print('Test data set is loaded !')
code
90111888/cell_10
[ "text_plain_output_1.png" ]
train_transactions.head()
code
90111888/cell_12
[ "text_plain_output_1.png" ]
train_identity.head()
code
90111888/cell_36
[ "text_plain_output_1.png" ]
train_df = train_transactions.merge(train_identity, how='left', on='TransactionID') del train_transactions, train_identity test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) train_df.set_index('TransactionID', inplace=True) test_df.set_index('TransactionID', inplace=True) train_df.to_pickle('train_df.pkl') test_df.to_pickle('test_df.pkl') object_cols = [col for col in train_df.columns if train_df[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(train_df[col]) == set(test_df[col])] bad_label_cols = list(set(object_cols) - set(good_label_cols)) object_nunique = list(map(lambda col: train_df[col].nunique(), object_cols)) d = dict(zip(object_cols, object_nunique)) sorted(d.items(), key=lambda x: -x[1])
code
49124559/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import train_test_split import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random import timm import torch import torch.nn as nn import numpy as np import pandas as pd import os import pandas as pd df = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') class CFG: debug = True n_classes = 5 lr = 0.0001 batch_size = 8 epochs = 1 seed = 777 n_fold = 4 warmup = -1 device = 0 amp = True amp_inf = False smooth = False smooth_alpha = 0.1 efnet_num = 10 drop_rate = 0.25 crop = False psuedo_label = False pseudo_predict = '2020-11-06_14:50:43.385109_predict.csv' TTA = False Attention = False white = False model_name = 'tf_efficientnet_b0_ns' zoom = True SIZE = 512 model_names = ['tf_efficientnet_b0_ns', 'vit_base_resnet26d_224', 'resnest50d', 'vit_base_patch32_384'] def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_torch(seed=42) torch.cuda.set_device(CFG.device) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') from sklearn.model_selection import train_test_split train, test = train_test_split(df, test_size=0.9, stratify=df['label'], random_state=2020) train = train.reset_index(drop=True) test = test.reset_index(drop=True) class efnet_model(nn.Module): def __init__(self): super().__init__() if CFG.efnet_num == 0: self.model = geffnet.efficientnet_b0(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 1: self.model = geffnet.efficientnet_b1(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 2: self.model = geffnet.efficientnet_b2(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 10: self.model = timm.create_model(CFG.model_name, pretrained=True, num_classes=CFG.n_classes) elif CFG.efnet_num < 3: self.model.classifier = nn.Linear(self.model.classifier.in_features, 2) def forward(self, x): if CFG.efnet_num == 10: x = self.model(x) else: x = self.model(x) return x if CFG.debug: folds = train.sample(n=200, random_state=CFG.seed).reset_index(drop=True).copy() else: folds = train.copy() train_labels = folds['label'].values kf = StratifiedKFold(n_splits=CFG.n_fold, shuffle=True, random_state=CFG.seed) for fold, (train_index, val_index) in enumerate(kf.split(folds.values, train_labels)): print('num_train,val', len(train_index), len(val_index), len(val_index) + len(train_index)) folds.loc[val_index, 'fold'] = int(fold)
code
49124559/cell_20
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from albumentations import Compose, Normalize, HorizontalFlip, VerticalFlip,RandomGamma, RandomRotate90,GaussNoise,Cutout from albumentations.pytorch import ToTensor from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import train_test_split from torch.optim import Adam, SGD from torch.utils.data import DataLoader, Dataset from tqdm import tqdm import cv2 import cv2 import datetime import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random import time import timm import torch import torch.nn as nn import numpy as np import pandas as pd import os import datetime dt_now = datetime.datetime.now() dt_now_ = str(dt_now).replace(' ', '_') import pandas as pd df = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') class CFG: debug = True n_classes = 5 lr = 0.0001 batch_size = 8 epochs = 1 seed = 777 n_fold = 4 warmup = -1 device = 0 amp = True amp_inf = False smooth = False smooth_alpha = 0.1 efnet_num = 10 drop_rate = 0.25 crop = False psuedo_label = False pseudo_predict = '2020-11-06_14:50:43.385109_predict.csv' TTA = False Attention = False white = False model_name = 'tf_efficientnet_b0_ns' zoom = True SIZE = 512 model_names = ['tf_efficientnet_b0_ns', 'vit_base_resnet26d_224', 'resnest50d', 'vit_base_patch32_384'] def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_torch(seed=42) torch.cuda.set_device(CFG.device) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') from sklearn.model_selection import train_test_split train, test = train_test_split(df, test_size=0.9, stratify=df['label'], random_state=2020) train = train.reset_index(drop=True) test = test.reset_index(drop=True) class efnet_model(nn.Module): def __init__(self): super().__init__() if CFG.efnet_num == 0: self.model = geffnet.efficientnet_b0(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 1: self.model = geffnet.efficientnet_b1(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 2: self.model = geffnet.efficientnet_b2(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 10: self.model = timm.create_model(CFG.model_name, pretrained=True, num_classes=CFG.n_classes) elif CFG.efnet_num < 3: self.model.classifier = nn.Linear(self.model.classifier.in_features, 2) def forward(self, x): if CFG.efnet_num == 10: x = self.model(x) else: x = self.model(x) return x if CFG.debug: folds = train.sample(n=200, random_state=CFG.seed).reset_index(drop=True).copy() else: folds = train.copy() train_labels = folds['label'].values kf = StratifiedKFold(n_splits=CFG.n_fold, shuffle=True, random_state=CFG.seed) for fold, (train_index, val_index) in enumerate(kf.split(folds.values, train_labels)): folds.loc[val_index, 'fold'] = int(fold) class TrainDataset(Dataset): def __init__(self, df, transform1=None, transform2=None): self.df = df self.transform = transform1 self.transform_ = transform2 def __len__(self): return len(self.df) def __getitem__(self, idx): path = self.df['image_id'].values[idx] file_path = '/kaggle/input/cassava-leaf-disease-classification/train_images/{}'.format(path) image = cv2.imread(file_path) if CFG.crop: image = crop_object(image) try: image = cv2.resize(image, (SIZE, SIZE)) except Exception as e: label_ = self.df['label'].values[idx] if self.transform: image = self.transform(image=image)['image'] if self.transform_: image = self.transform_(image=image)['image'] label = torch.tensor(label_) return (image, label) def get_transforms1(*, data): if data == 'train': return Compose([HorizontalFlip(p=0.5), VerticalFlip(p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) elif data == 'valid': return Compose([Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) def to_tensor(*args): return Compose([ToTensor()]) def train_fn(fold): trn_idx = folds[folds['fold'] != fold].index val_idx = folds[folds['fold'] == fold].index train_df = folds.loc[trn_idx].reset_index(drop=True) valid_df = folds.loc[val_idx].reset_index(drop=True) train_dataset = TrainDataset(folds.loc[trn_idx].reset_index(drop=True), transform1=get_transforms1(data='train'), transform2=to_tensor()) valid_dataset = TrainDataset(folds.loc[val_idx].reset_index(drop=True), transform1=get_transforms1(data='valid'), transform2=to_tensor()) train_loader = DataLoader(train_dataset, batch_size=CFG.batch_size, shuffle=True, num_workers=4) valid_loader = DataLoader(valid_dataset, batch_size=CFG.batch_size, shuffle=False, num_workers=4) model = efnet_model() model.to(device) optimizer = Adam(model.parameters(), lr=CFG.lr, amsgrad=False) criterion = nn.CrossEntropyLoss() softmax = nn.Softmax(dim=1) for epoch in range(CFG.epochs): start_time = time.time() model.train() avg_loss = 0.0 tk0 = tqdm(enumerate(train_loader), total=len(train_loader)) for i, (images, labels) in tk0: images = images.to(device) labels = labels.to(device) optimizer.zero_grad() y_preds = model(images.float()) loss = criterion(y_preds, labels.long()) loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() avg_val_loss = 0.0 valid_labels = [] preds = [] tk1 = tqdm(enumerate(valid_loader), total=len(valid_loader)) for i, (images, labels) in tk1: images = images.to(device) labels = labels.to(device) with torch.no_grad(): y_preds = model(images.float()) loss = criterion(y_preds, labels.long()) valid_labels.append(labels.to('cpu').detach().numpy().copy()) y_preds = softmax(y_preds) preds.append(y_preds.to('cpu').detach().numpy().copy()) avg_val_loss += loss.item() / len(valid_loader) preds = np.concatenate(preds) valid_labels = np.concatenate(valid_labels) torch.save(model.state_dict(), f'fold{fold}_{dt_now_}_baseline.pth') return (preds, valid_labels) def auc(predict, labels): pass predict = [] labels = [] for fold in range(CFG.n_fold): _pred, _label = train_fn(fold) predict.append(_pred) labels.append(_label) predict = np.concatenate(predict) labels = np.concatenate(labels) score = auc(predict, labels) print(predict) print(labels) print(len(labels)) print(score)
code
49124559/cell_11
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') from sklearn.model_selection import train_test_split train, test = train_test_split(df, test_size=0.9, stratify=df['label'], random_state=2020) train = train.reset_index(drop=True) test = test.reset_index(drop=True) print(train['label'].value_counts()) print(test['label'].value_counts())
code
49124559/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49124559/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df
code
49124559/cell_8
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_9.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_8.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_1.png" ]
path = '../input/cassava-leaf-disease-classification/train_images/100042118.jpg' import cv2 import matplotlib.pyplot as plt img = cv2.imread(path) im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) print(img.shape) plt.imshow(im_gray, cmap='gray') plt.show() plt.imshow(im_gray, cmap='jet') plt.show()
code
49124559/cell_3
[ "text_plain_output_1.png" ]
!pip install ttach !pip install timm
code
49124559/cell_14
[ "text_html_output_1.png" ]
from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import train_test_split import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random import timm import torch import torch.nn as nn import numpy as np import pandas as pd import os import pandas as pd df = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') class CFG: debug = True n_classes = 5 lr = 0.0001 batch_size = 8 epochs = 1 seed = 777 n_fold = 4 warmup = -1 device = 0 amp = True amp_inf = False smooth = False smooth_alpha = 0.1 efnet_num = 10 drop_rate = 0.25 crop = False psuedo_label = False pseudo_predict = '2020-11-06_14:50:43.385109_predict.csv' TTA = False Attention = False white = False model_name = 'tf_efficientnet_b0_ns' zoom = True SIZE = 512 model_names = ['tf_efficientnet_b0_ns', 'vit_base_resnet26d_224', 'resnest50d', 'vit_base_patch32_384'] def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_torch(seed=42) torch.cuda.set_device(CFG.device) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') from sklearn.model_selection import train_test_split train, test = train_test_split(df, test_size=0.9, stratify=df['label'], random_state=2020) train = train.reset_index(drop=True) test = test.reset_index(drop=True) class efnet_model(nn.Module): def __init__(self): super().__init__() if CFG.efnet_num == 0: self.model = geffnet.efficientnet_b0(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 1: self.model = geffnet.efficientnet_b1(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 2: self.model = geffnet.efficientnet_b2(pretrained=True, drop_rate=CFG.drop_rate) elif CFG.efnet_num == 10: self.model = timm.create_model(CFG.model_name, pretrained=True, num_classes=CFG.n_classes) elif CFG.efnet_num < 3: self.model.classifier = nn.Linear(self.model.classifier.in_features, 2) def forward(self, x): if CFG.efnet_num == 10: x = self.model(x) else: x = self.model(x) return x if CFG.debug: folds = train.sample(n=200, random_state=CFG.seed).reset_index(drop=True).copy() else: folds = train.copy() train_labels = folds['label'].values kf = StratifiedKFold(n_splits=CFG.n_fold, shuffle=True, random_state=CFG.seed) for fold, (train_index, val_index) in enumerate(kf.split(folds.values, train_labels)): folds.loc[val_index, 'fold'] = int(fold) folds
code
49124559/cell_5
[ "text_plain_output_1.png" ]
import datetime import datetime dt_now = datetime.datetime.now() dt_now_ = str(dt_now).replace(' ', '_') print('実験開始', dt_now_)
code
34144202/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique())
code
34144202/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.head()
code
34144202/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train.dtypes cols_to_be_selected = ['image_id', 'x0', 'y0', 'w', 'h'] df1_train = df_train[cols_to_be_selected] val_percentage = 0.2 num_val_images = int(len(df1_train['image_id'].unique()) * val_percentage) num_train_images = len(df1_train['image_id'].unique()) - num_val_images list_val_imageid = list(df1_train['image_id'].unique())[-1 * num_val_images:] list_train_imageid = list(df1_train['image_id'].unique())[:num_train_images] print('Number of validation images: ', num_val_images) print('Number of training images: ', num_train_images) print(num_val_images + num_train_images)
code
34144202/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) for col in df_train.columns: if sum(df_train[col].isnull()) == 1: print(col + ' has null values') else: print(col + ' no null values')
code
34144202/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train.head()
code
34144202/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train['width'].value_counts()
code
34144202/cell_7
[ "text_plain_output_1.png" ]
import os os.listdir('/kaggle/input/global-wheat-detection')
code
34144202/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train['height'].value_counts()
code
34144202/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train['image_id'].value_counts()
code
34144202/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train['source'].value_counts()
code
34144202/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train.dtypes cols_to_be_selected = ['image_id', 'x0', 'y0', 'w', 'h'] df1_train = df_train[cols_to_be_selected] val_percentage = 0.2 num_val_images = int(len(df1_train['image_id'].unique()) * val_percentage) num_train_images = len(df1_train['image_id'].unique()) - num_val_images list_val_imageid = list(df1_train['image_id'].unique())[-1 * num_val_images:] list_train_imageid = list(df1_train['image_id'].unique())[:num_train_images] df2_val = df1_train.loc[df1_train['image_id'].isin(list_val_imageid), :] df2_train = df1_train.loc[df1_train['image_id'].isin(list_train_imageid), :] train_dir = '/kaggle/input/global-wheat-detection/train' test_obj = GlobalWheatDetectionDataset(df2_train, train_dir)
code
34144202/cell_31
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) df_train.dtypes
code
34144202/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) for col in df_train.columns: if sum(df_train[col].isnull()) == 1: print(col + ' has null values') else: print(col + ' no null values')
code
34144202/cell_22
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) os.listdir('/kaggle/input/global-wheat-detection') df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape df_train.shape[0] / len(df_train['image_id'].unique()) list_image_ids_df = list(df_train['image_id'].unique()) list_image_ids_dir = os.listdir('/kaggle/input/global-wheat-detection/train') list_image_ids_df.sort() == list_image_ids_dir.sort()
code
34144202/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape
code
34144202/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/global-wheat-detection/train.csv') df_train.shape len(df_train['image_id'].unique())
code
88086039/cell_34
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv(dirname + '/train.csv') test = pd.read_csv(dirname + '/test.csv') pid_test = test['PassengerId'] pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1) pct_missing.sort_values(ascending=False).head() plt.figure(figsize=(16, 4)) sns.countplot(x='Survived', hue='Sex', data=train, palette='coolwarm')
code
88086039/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv(dirname + '/train.csv') test = pd.read_csv(dirname + '/test.csv') pid_test = test['PassengerId'] pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1) pct_missing.sort_values(ascending=False).head() plt.figure(figsize=(16, 6)) sns.heatmap(data=train.isnull(), yticklabels=False, cbar=False, cmap='viridis')
code
88086039/cell_41
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv(dirname + '/train.csv') test = pd.read_csv(dirname + '/test.csv') pid_test = test['PassengerId'] pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1) pct_missing.sort_values(ascending=False).head() plt.figure(figsize=(16, 4)) sns.countplot(x='Embarked', data=train, hue='Survived') plt.legend(loc=1)
code
88086039/cell_2
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88086039/cell_18
[ "text_html_output_1.png" ]
train = pd.read_csv(dirname + '/train.csv') test = pd.read_csv(dirname + '/test.csv') pid_test = test['PassengerId'] train.info()
code
88086039/cell_32
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv(dirname + '/train.csv') test = pd.read_csv(dirname + '/test.csv') pid_test = test['PassengerId'] pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1) pct_missing.sort_values(ascending=False).head() plt.figure(figsize=(16, 4)) sns.countplot(x='Survived', data=train)
code
88086039/cell_38
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv(dirname + '/train.csv') test = pd.read_csv(dirname + '/test.csv') pid_test = test['PassengerId'] pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1) pct_missing.sort_values(ascending=False).head() plt.figure(figsize=(16, 4)) sns.countplot(x='Survived', hue='Pclass', data=train) plt.legend(loc=1)
code
88086039/cell_43
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns train = pd.read_csv(dirname + '/train.csv') test = pd.read_csv(dirname + '/test.csv') pid_test = test['PassengerId'] pct_missing = round(train.isnull().sum() / train.isnull().count() * 100, 1) pct_missing.sort_values(ascending=False).head() print('Survival rate with respect to the port of embarkation:') print('\tS: {} %'.format(train[(train.Embarked == 'S') & (train.Survived == 1)]['Survived'].count() / len(train[train.Embarked == 'S']) * 100)) print('\tC: {} %'.format(train[(train.Embarked == 'C') & (train.Survived == 1)]['Survived'].count() / len(train[train.Embarked == 'C']) * 100)) print('\tQ: {} %'.format(train[(train.Embarked == 'Q') & (train.Survived == 1)]['Survived'].count() / len(train[train.Embarked == 'Q']) * 100))
code