path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
129024934/cell_24 | [
"text_plain_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df | code |
129024934/cell_22 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df | code |
129024934/cell_53 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
dfnew = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df3 = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
df3 = pd.DataFrame(df3)
df3 | code |
129024934/cell_27 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y'] | code |
129024934/cell_37 | [
"text_html_output_1.png"
] | from numpy.random import randn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'Italy', 'Japan'])
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df.drop('new', axis=1, inplace=True)
df
df.drop('E', axis=0, inplace=True)
df.loc['A']
df.iloc[1]
df.loc['B', 'Y']
df.loc[['A', 'B'], ['W', 'Y']]
df | code |
129024934/cell_12 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels)
pd.Series(d)
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
ser1['USA'] | code |
129024934/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data=my_data)
pd.Series(data=my_data, index=labels) | code |
33095866/cell_13 | [
"text_html_output_2.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px # plotly express
lockdown_df = pd.read_csv(files['countryLockdowndates.csv'])
lockdown_df['LockDown Date'] = pd.to_datetime(lockdown_df['Date'], format='%d/%m/%Y')
lockdown_df.sort_values('LockDown Date', inplace=True)
df = pd.read_csv(files['time_series_covid_19_confirmed.csv'])
df[df.columns[df.columns.str.contains('/20')]] = df[df.columns[df.columns.str.contains('/20')]].clip(lower=0)
country_col = 'Country/Region'
confirmed_col = 'Confirmed Cases'
confirmed_df = pd.melt(df[df.columns.difference(['Province/State', 'Lat', 'Long'])].groupby([country_col, 'iso_codes']).sum().reset_index(), id_vars=[country_col, 'iso_codes'], var_name='Date', value_name=confirmed_col)
confirmed_df = pd.merge(confirmed_df, lockdown_df[[country_col, 'LockDown Date']].groupby(country_col).first(), left_on=country_col, right_on=country_col, how='left')
confirmed_df['Date'] = pd.to_datetime(confirmed_df['Date'])
confirmed_df.sort_values('Date', inplace=True)
fig = px.choropleth(confirmed_df, locations='iso_codes', hover_name=country_col, animation_frame=confirmed_df['Date'].astype(str), color=confirmed_col, color_continuous_scale=px.colors.sequential.Rainbow, projection='natural earth', title='Confirmed Cases over the world')
top_affected_countries = df.sort_values(confirmed_df['Date'].max().strftime('%-m/%-d/%y'), ascending=False)[country_col].iloc[:10].values
confirmed_df = confirmed_df[confirmed_df[country_col].isin(top_affected_countries)].sort_values('Date')
fig = px.line(confirmed_df, color=country_col, x='Date', y=confirmed_col, title='Confirmed Case vs Date for top 10 infected countries')
fig.update_xaxes(rangeslider_visible=True)
confirmed_pct_df = pd.concat([confirmed_df, confirmed_df.groupby([country_col])[confirmed_col].pct_change().rename('Percentage Change') * 100], axis=1)
fig = px.line(confirmed_pct_df, color=country_col, x='Date', y='Percentage Change', title='Percentage Change each day for top 10 infected countries')
fig.update_layout(yaxis={'ticksuffix': '%'})
fig.update_xaxes(rangeslider_visible=True)
confirmed_pct_df['Percentage Change'] = confirmed_pct_df[[confirmed_col, 'Percentage Change']].apply(lambda x: x['Percentage Change'] if x['Percentage Change'] != np.inf else x[confirmed_col] * 100, axis=1)
confirmed_pct_df['After LockDown'] = (confirmed_pct_df['Date'] > confirmed_pct_df['LockDown Date']).astype(str)
Mean_Median_Confirmed_df = confirmed_pct_df[[country_col, 'After LockDown', 'Percentage Change']].groupby([country_col, 'After LockDown']).agg(['mean', 'std'])
Mean_Median_Confirmed_df.columns = Mean_Median_Confirmed_df.columns.droplevel(0)
Mean_Median_Confirmed_df.rename({'mean': 'Mean', 'std': 'Standard Deviation'}, axis=1, inplace=True)
Mean_Median_Confirmed_df = Mean_Median_Confirmed_df.reset_index()
fig = px.bar(Mean_Median_Confirmed_df, x=country_col, y='Standard Deviation', color='After LockDown', barmode='group', title='Standard Deviation Comparison of Percentage Change Before & After Lockdown for top 10 infected countries')
fig.show()
fig = px.bar(Mean_Median_Confirmed_df, x=country_col, y='Mean', color='After LockDown', barmode='group', title='Mean Comparison of Percentage Change Before & After Lockdown for top 10 infected countries')
fig.show() | code |
33095866/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px # plotly express
lockdown_df = pd.read_csv(files['countryLockdowndates.csv'])
lockdown_df['LockDown Date'] = pd.to_datetime(lockdown_df['Date'], format='%d/%m/%Y')
lockdown_df.sort_values('LockDown Date', inplace=True)
df = pd.read_csv(files['time_series_covid_19_confirmed.csv'])
df[df.columns[df.columns.str.contains('/20')]] = df[df.columns[df.columns.str.contains('/20')]].clip(lower=0)
country_col = 'Country/Region'
confirmed_col = 'Confirmed Cases'
confirmed_df = pd.melt(df[df.columns.difference(['Province/State', 'Lat', 'Long'])].groupby([country_col, 'iso_codes']).sum().reset_index(), id_vars=[country_col, 'iso_codes'], var_name='Date', value_name=confirmed_col)
confirmed_df = pd.merge(confirmed_df, lockdown_df[[country_col, 'LockDown Date']].groupby(country_col).first(), left_on=country_col, right_on=country_col, how='left')
confirmed_df['Date'] = pd.to_datetime(confirmed_df['Date'])
confirmed_df.sort_values('Date', inplace=True)
fig = px.choropleth(confirmed_df, locations='iso_codes', hover_name=country_col, animation_frame=confirmed_df['Date'].astype(str), color=confirmed_col, color_continuous_scale=px.colors.sequential.Rainbow, projection='natural earth', title='Confirmed Cases over the world')
fig.show() | code |
33095866/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
lockdown_df = pd.read_csv(files['countryLockdowndates.csv'])
lockdown_df['LockDown Date'] = pd.to_datetime(lockdown_df['Date'], format='%d/%m/%Y')
lockdown_df.sort_values('LockDown Date', inplace=True)
df = pd.read_csv(files['time_series_covid_19_confirmed.csv'])
df[df.columns[df.columns.str.contains('/20')]] = df[df.columns[df.columns.str.contains('/20')]].clip(lower=0)
country_col = 'Country/Region'
confirmed_col = 'Confirmed Cases'
df.head() | code |
33095866/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import plotly.express as px
import pycountry
from geopy.geocoders import Nominatim
import os
file_input = ['/kaggle/input', '../../../datasets/extracts/']
files = {}
for dirname, _, filenames in os.walk(file_input[0]):
for filename in filenames:
files[filename] = os.path.join(dirname, filename)
print(filename) | code |
33095866/cell_8 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px # plotly express
lockdown_df = pd.read_csv(files['countryLockdowndates.csv'])
lockdown_df['LockDown Date'] = pd.to_datetime(lockdown_df['Date'], format='%d/%m/%Y')
lockdown_df.sort_values('LockDown Date', inplace=True)
df = pd.read_csv(files['time_series_covid_19_confirmed.csv'])
df[df.columns[df.columns.str.contains('/20')]] = df[df.columns[df.columns.str.contains('/20')]].clip(lower=0)
country_col = 'Country/Region'
confirmed_col = 'Confirmed Cases'
confirmed_df = pd.melt(df[df.columns.difference(['Province/State', 'Lat', 'Long'])].groupby([country_col, 'iso_codes']).sum().reset_index(), id_vars=[country_col, 'iso_codes'], var_name='Date', value_name=confirmed_col)
confirmed_df = pd.merge(confirmed_df, lockdown_df[[country_col, 'LockDown Date']].groupby(country_col).first(), left_on=country_col, right_on=country_col, how='left')
confirmed_df['Date'] = pd.to_datetime(confirmed_df['Date'])
confirmed_df.sort_values('Date', inplace=True)
fig = px.choropleth(confirmed_df, locations='iso_codes', hover_name=country_col, animation_frame=confirmed_df['Date'].astype(str), color=confirmed_col, color_continuous_scale=px.colors.sequential.Rainbow, projection='natural earth', title='Confirmed Cases over the world')
top_affected_countries = df.sort_values(confirmed_df['Date'].max().strftime('%-m/%-d/%y'), ascending=False)[country_col].iloc[:10].values
confirmed_df = confirmed_df[confirmed_df[country_col].isin(top_affected_countries)].sort_values('Date')
fig = px.line(confirmed_df, color=country_col, x='Date', y=confirmed_col, title='Confirmed Case vs Date for top 10 infected countries')
fig.update_xaxes(rangeslider_visible=True)
fig.show() | code |
33095866/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px # plotly express
lockdown_df = pd.read_csv(files['countryLockdowndates.csv'])
lockdown_df['LockDown Date'] = pd.to_datetime(lockdown_df['Date'], format='%d/%m/%Y')
lockdown_df.sort_values('LockDown Date', inplace=True)
df = pd.read_csv(files['time_series_covid_19_confirmed.csv'])
df[df.columns[df.columns.str.contains('/20')]] = df[df.columns[df.columns.str.contains('/20')]].clip(lower=0)
country_col = 'Country/Region'
confirmed_col = 'Confirmed Cases'
confirmed_df = pd.melt(df[df.columns.difference(['Province/State', 'Lat', 'Long'])].groupby([country_col, 'iso_codes']).sum().reset_index(), id_vars=[country_col, 'iso_codes'], var_name='Date', value_name=confirmed_col)
confirmed_df = pd.merge(confirmed_df, lockdown_df[[country_col, 'LockDown Date']].groupby(country_col).first(), left_on=country_col, right_on=country_col, how='left')
confirmed_df['Date'] = pd.to_datetime(confirmed_df['Date'])
confirmed_df.sort_values('Date', inplace=True)
fig = px.choropleth(confirmed_df, locations='iso_codes', hover_name=country_col, animation_frame=confirmed_df['Date'].astype(str), color=confirmed_col, color_continuous_scale=px.colors.sequential.Rainbow, projection='natural earth', title='Confirmed Cases over the world')
top_affected_countries = df.sort_values(confirmed_df['Date'].max().strftime('%-m/%-d/%y'), ascending=False)[country_col].iloc[:10].values
confirmed_df = confirmed_df[confirmed_df[country_col].isin(top_affected_countries)].sort_values('Date')
fig = px.line(confirmed_df, color=country_col, x='Date', y=confirmed_col, title='Confirmed Case vs Date for top 10 infected countries')
fig.update_xaxes(rangeslider_visible=True)
confirmed_pct_df = pd.concat([confirmed_df, confirmed_df.groupby([country_col])[confirmed_col].pct_change().rename('Percentage Change') * 100], axis=1)
fig = px.line(confirmed_pct_df, color=country_col, x='Date', y='Percentage Change', title='Percentage Change each day for top 10 infected countries')
fig.update_layout(yaxis={'ticksuffix': '%'})
fig.update_xaxes(rangeslider_visible=True)
fig.show() | code |
50227879/cell_4 | [
"text_plain_output_1.png"
] | from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data() | code |
50227879/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
x = x_train[1]
plt.imshow(x, cmap='gray') | code |
50227879/cell_11 | [
"text_plain_output_1.png"
] | import keras
img_cols, img_rows = (28, 28)
input_shape = (img_cols, img_rows, 1)
batch_size = 128
num_classes = 10
epochs = 12
x_train = x_train.reshape(x_train.shape[0], img_cols, img_rows, 1)
x_test = x_test.reshape(x_test.shape[0], img_cols, img_rows, 1)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape) | code |
50227879/cell_19 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPool2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
import keras
img_cols, img_rows = (28, 28)
input_shape = (img_cols, img_rows, 1)
batch_size = 128
num_classes = 10
epochs = 12
x_train = x_train.reshape(x_train.shape[0], img_cols, img_rows, 1)
x_test = x_test.reshape(x_test.shape[0], img_cols, img_rows, 1)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
model.evaluate(x_test, y_test) | code |
50227879/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50227879/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
x = x_train[1]
x.shape | code |
50227879/cell_18 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPool2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
import keras
img_cols, img_rows = (28, 28)
input_shape = (img_cols, img_rows, 1)
batch_size = 128
num_classes = 10
epochs = 12
x_train = x_train.reshape(x_train.shape[0], img_cols, img_rows, 1)
x_test = x_test.reshape(x_test.shape[0], img_cols, img_rows, 1)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) | code |
50227879/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | print('Train size : \n')
print(x_train.shape)
print(y_train.shape)
print('\n Test size : \n')
print(x_test.shape)
print(y_test.shape) | code |
50227879/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.layers import Conv2D, MaxPool2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
import keras
img_cols, img_rows = (28, 28)
input_shape = (img_cols, img_rows, 1)
batch_size = 128
num_classes = 10
epochs = 12
x_train = x_train.reshape(x_train.shape[0], img_cols, img_rows, 1)
x_test = x_test.reshape(x_test.shape[0], img_cols, img_rows, 1)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.summary() | code |
33095778/cell_4 | [
"image_output_1.png"
] | import yfinance
raw_data = yfinance.download(tickers='^GSPC ^FTSE ^N225 ^GDAXI', start='1994-01-07', end='2019-09-01', interval='1d', group_by='ticker', auto_adjust=True, treads=True) | code |
33095778/cell_34 | [
"image_output_1.png"
] | from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
model_ar = ARIMA(df.ftse, order=(1, 0, 0))
results_ar = model_ar.fit()
start_date = '2014-07-16'
end_date = '2015-01-01'
model_ret_ar = ARIMA(df.ret_ftse[1:], order=(5, 0, 0))
results_ret_ar = model_ret_ar.fit()
df_pred_ret_ar = results_ret_ar.predict(start=start_date, end=end_date)
model_ret_ma = ARIMA(df.ret_ftse[1:], order=(0, 0, 5))
results_ret_ma = model_ret_ma.fit()
df_pred_ret_ma = results_ret_ma.predict(start=start_date, end=end_date)
df_pred_ret_ma[start_date:end_date].plot(figsize=(20, 5), color='red')
df_test.ret_ftse[start_date:end_date].plot(color='blue')
plt.title('Predictions vs Actuals(Returns) | MA', size=24)
plt.show() | code |
33095778/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
start_date = '2014-07-16'
end_date = '2015-01-01'
df_pred.predictions[start_date:end_date].plot(figsize=(20, 5), color='red')
df_test.ftse[start_date:end_date].plot(color='blue')
plt.title('Predictions v/s Actuals', size=24)
plt.legend()
plt.show() | code |
33095778/cell_41 | [
"image_output_1.png"
] | from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
model_ar = ARIMA(df.ftse, order=(1, 0, 0))
results_ar = model_ar.fit()
start_date = '2014-07-16'
end_date = '2015-01-01'
model_ret_ar = ARIMA(df.ret_ftse[1:], order=(5, 0, 0))
results_ret_ar = model_ret_ar.fit()
df_pred_ret_ar = results_ret_ar.predict(start=start_date, end=end_date)
model_ret_ma = ARIMA(df.ret_ftse[1:], order=(0, 0, 5))
results_ret_ma = model_ret_ma.fit()
df_pred_ret_ma = results_ret_ma.predict(start=start_date, end=end_date)
model_ret_arma = ARIMA(df.ret_ftse[1:], order=(4, 0, 5))
results_ret_arma = model_ret_arma.fit()
df_pred_ret_arma = results_ret_arma.predict(start=start_date, end=end_date)
model_ret_armax = ARIMA(df.ret_ftse[1:], exog=df[['ret_spx', 'ret_dax', 'ret_nikkei']][1:], order=(1, 0, 1))
results_ret_armax = model_ret_armax.fit()
df_pred_ret_armax = results_ret_armax.predict(start=start_date, end=end_date, exog=df_test[['ret_spx', 'ret_dax', 'ret_nikkei']][start_date:end_date])
df_test['int_ftse_ret'] = df_test.ftse.diff(1)
model_arima = ARIMA(df.ret_ftse[1:], order=(1, 1, 1))
results_arima = model_arima.fit()
df_pred_arima = results_arima.predict(start=start_date, end=end_date)
df_pred_arima[start_date:end_date].plot(figsize=(20, 5), color='red')
df_test.ret_ftse[start_date:end_date].plot(color='blue')
plt.title('Predictions vs Actuals(Returns) | ARIMA', size=24)
plt.show() | code |
33095778/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import scipy
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
import statsmodels.graphics.tsaplots as sgt
import statsmodels.tsa.stattools as sts
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
!pip install pmdarima -U
from pmdarima.arima import auto_arima
!pip install arch -U
from arch import arch_model
!pip install yfinance -U
import yfinance
import warnings
warnings.filterwarnings("ignore")
sns.set() | code |
33095778/cell_28 | [
"image_output_1.png"
] | from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
model_ar = ARIMA(df.ftse, order=(1, 0, 0))
results_ar = model_ar.fit()
start_date = '2014-07-16'
end_date = '2015-01-01'
model_ret_ar = ARIMA(df.ret_ftse[1:], order=(5, 0, 0))
results_ret_ar = model_ret_ar.fit()
df_pred_ret_ar = results_ret_ar.predict(start=start_date, end=end_date)
df_pred_ret_ar[start_date:end_date].plot(figsize=(20, 5), color='red')
df_test.ret_ftse[start_date:end_date].plot(color='blue')
plt.title('Predictions vs Actuals(Returns) | AR', size=24)
plt.show() | code |
33095778/cell_16 | [
"text_plain_output_1.png"
] | from statsmodels.tsa.arima_model import ARIMA
model_ar = ARIMA(df.ftse, order=(1, 0, 0))
results_ar = model_ar.fit()
df.tail() | code |
33095778/cell_38 | [
"image_output_1.png"
] | from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
model_ar = ARIMA(df.ftse, order=(1, 0, 0))
results_ar = model_ar.fit()
start_date = '2014-07-16'
end_date = '2015-01-01'
model_ret_ar = ARIMA(df.ret_ftse[1:], order=(5, 0, 0))
results_ret_ar = model_ret_ar.fit()
df_pred_ret_ar = results_ret_ar.predict(start=start_date, end=end_date)
model_ret_ma = ARIMA(df.ret_ftse[1:], order=(0, 0, 5))
results_ret_ma = model_ret_ma.fit()
df_pred_ret_ma = results_ret_ma.predict(start=start_date, end=end_date)
model_ret_arma = ARIMA(df.ret_ftse[1:], order=(4, 0, 5))
results_ret_arma = model_ret_arma.fit()
df_pred_ret_arma = results_ret_arma.predict(start=start_date, end=end_date)
model_ret_armax = ARIMA(df.ret_ftse[1:], exog=df[['ret_spx', 'ret_dax', 'ret_nikkei']][1:], order=(1, 0, 1))
results_ret_armax = model_ret_armax.fit()
df_pred_ret_armax = results_ret_armax.predict(start=start_date, end=end_date, exog=df_test[['ret_spx', 'ret_dax', 'ret_nikkei']][start_date:end_date])
df_pred_ret_armax[start_date:end_date].plot(figsize=(20, 5), color='red')
df_test.ret_ftse[start_date:end_date].plot(color='blue')
plt.title('Predictions vs Actuals(Returns)|ARMAX', size=24)
plt.show() | code |
33095778/cell_43 | [
"image_output_1.png"
] | from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
model_ar = ARIMA(df.ftse, order=(1, 0, 0))
results_ar = model_ar.fit()
start_date = '2014-07-16'
end_date = '2015-01-01'
model_ret_ar = ARIMA(df.ret_ftse[1:], order=(5, 0, 0))
results_ret_ar = model_ret_ar.fit()
df_pred_ret_ar = results_ret_ar.predict(start=start_date, end=end_date)
model_ret_ma = ARIMA(df.ret_ftse[1:], order=(0, 0, 5))
results_ret_ma = model_ret_ma.fit()
df_pred_ret_ma = results_ret_ma.predict(start=start_date, end=end_date)
model_ret_arma = ARIMA(df.ret_ftse[1:], order=(4, 0, 5))
results_ret_arma = model_ret_arma.fit()
df_pred_ret_arma = results_ret_arma.predict(start=start_date, end=end_date)
model_ret_armax = ARIMA(df.ret_ftse[1:], exog=df[['ret_spx', 'ret_dax', 'ret_nikkei']][1:], order=(1, 0, 1))
results_ret_armax = model_ret_armax.fit()
df_pred_ret_armax = results_ret_armax.predict(start=start_date, end=end_date, exog=df_test[['ret_spx', 'ret_dax', 'ret_nikkei']][start_date:end_date])
df_test['int_ftse_ret'] = df_test.ftse.diff(1)
model_arima = ARIMA(df.ret_ftse[1:], order=(1, 1, 1))
results_arima = model_arima.fit()
df_pred_arima = results_arima.predict(start=start_date, end=end_date)
model_arimax = ARIMA(df.ret_ftse[1:], exog=df[['ret_spx', 'ret_dax', 'ret_nikkei']][1:], order=(1, 1, 1))
results_arimax = model_arimax.fit()
df_pred_arimax = results_arimax.predict(start=start_date, end=end_date, exog=df_test[['ret_spx', 'ret_dax', 'ret_nikkei']][start_date:end_date])
df_pred_arimax[start_date:end_date].plot(figsize=(20, 5), color='red')
df_test.ret_ftse[start_date:end_date].plot(color='blue')
plt.title('Predictions vs Actuals(Returns)|ARIMAX', size=24)
plt.show() | code |
33095778/cell_36 | [
"image_output_1.png"
] | from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
model_ar = ARIMA(df.ftse, order=(1, 0, 0))
results_ar = model_ar.fit()
start_date = '2014-07-16'
end_date = '2015-01-01'
model_ret_ar = ARIMA(df.ret_ftse[1:], order=(5, 0, 0))
results_ret_ar = model_ret_ar.fit()
df_pred_ret_ar = results_ret_ar.predict(start=start_date, end=end_date)
model_ret_ma = ARIMA(df.ret_ftse[1:], order=(0, 0, 5))
results_ret_ma = model_ret_ma.fit()
df_pred_ret_ma = results_ret_ma.predict(start=start_date, end=end_date)
model_ret_arma = ARIMA(df.ret_ftse[1:], order=(4, 0, 5))
results_ret_arma = model_ret_arma.fit()
df_pred_ret_arma = results_ret_arma.predict(start=start_date, end=end_date)
df_pred_ret_arma[start_date:end_date].plot(figsize=(20, 5), color='red')
df_test.ret_ftse[start_date:end_date].plot(color='blue')
plt.title('Predictions vs Actuals(Returns) | ARMA', size=24)
plt.show() | code |
128029153/cell_13 | [
"text_plain_output_1.png"
] | from PIL import Image
from PIL import Image, ImageDraw
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import os
import os
import torch
import torch
import torchvision
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
img, target = coco_train[0]
import os
import torch
import torch.utils.data
import torchvision
from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
class myOwnDataset(torch.utils.data.Dataset):
def __init__(self, root, annotation, transforms=None):
self.root = root
self.transforms = transforms
self.coco = COCO(annotation)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
coco_annotation = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path))
num_objs = len(coco_annotation)
boxes = []
area = 0
for i in range(num_objs):
xmin = coco_annotation[i]['bbox'][0]
ymin = coco_annotation[i]['bbox'][1]
xmax = xmin + coco_annotation[i]['bbox'][2]
ymax = ymin + coco_annotation[i]['bbox'][3]
area += (xmax - xmin) * (ymax - ymin)
boxes.append([xmin, ymin, xmax, ymax])
if num_objs == 0:
boxes = torch.zeros((0, 4), dtype=torch.float32)
else:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.ones((num_objs,), dtype=torch.int64)
img_id = torch.tensor([img_id])
areas = []
for i in range(num_objs):
areas.append(coco_annotation[i]['area'])
area = torch.as_tensor(area, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
my_annotation = {}
my_annotation['boxes'] = boxes
my_annotation['labels'] = labels
my_annotation['image_id'] = img_id
my_annotation['area'] = area
my_annotation['iscrowd'] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
return (img, my_annotation)
def __len__(self):
return len(self.ids)
def get_transform():
custom_transforms = []
custom_transforms.append(torchvision.transforms.ToTensor())
return torchvision.transforms.Compose(custom_transforms)
def collate_fn(batch):
return tuple(zip(*batch))
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True, progress=True, pretrained_backbone=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
train_batch_size = 1
train_shuffle_dl = True
num_workers_dl = 4
num_classes = 2
num_epochs = 2
lr = 0.005
momentum = 0.9
weight_decay = 0.005
import torch
my_dataset = myOwnDataset(root=path2data, annotation=path2json, transforms=get_transform())
data_loader = torch.utils.data.DataLoader(my_dataset, batch_size=train_batch_size, shuffle=train_shuffle_dl, num_workers=num_workers_dl, collate_fn=collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
for imgs, annotations in data_loader:
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
model = get_model_instance_segmentation(num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
len_dataloader = len(data_loader)
for epoch in range(num_epochs):
model.train()
i = 0
for imgs, annotations in data_loader:
i += 1
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
loss_dict = model(imgs, annotations)
losses = sum((loss for loss in loss_dict.values()))
optimizer.zero_grad()
losses.backward()
optimizer.step()
model.eval()
from PIL import Image, ImageDraw
sample_image_path = '/kaggle/input/levi9-hack9-2023/test/005.jpg'
sample_image = Image.open(sample_image_path)
sample_image
transformed_img = torchvision.transforms.transforms.ToTensor()(sample_image)
result = model([transformed_img.to(device)])
result
boat_id = 1
boat_boxes = [x.cpu().detach().numpy().tolist() for i, x in enumerate(result[0]['boxes']) if result[0]['labels'][i] == boat_id]
boat_boxes | code |
128029153/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import os
import os
import torch
import torch
import torchvision
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
img, target = coco_train[0]
import os
import torch
import torch.utils.data
import torchvision
from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
class myOwnDataset(torch.utils.data.Dataset):
def __init__(self, root, annotation, transforms=None):
self.root = root
self.transforms = transforms
self.coco = COCO(annotation)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
coco_annotation = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path))
num_objs = len(coco_annotation)
boxes = []
area = 0
for i in range(num_objs):
xmin = coco_annotation[i]['bbox'][0]
ymin = coco_annotation[i]['bbox'][1]
xmax = xmin + coco_annotation[i]['bbox'][2]
ymax = ymin + coco_annotation[i]['bbox'][3]
area += (xmax - xmin) * (ymax - ymin)
boxes.append([xmin, ymin, xmax, ymax])
if num_objs == 0:
boxes = torch.zeros((0, 4), dtype=torch.float32)
else:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.ones((num_objs,), dtype=torch.int64)
img_id = torch.tensor([img_id])
areas = []
for i in range(num_objs):
areas.append(coco_annotation[i]['area'])
area = torch.as_tensor(area, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
my_annotation = {}
my_annotation['boxes'] = boxes
my_annotation['labels'] = labels
my_annotation['image_id'] = img_id
my_annotation['area'] = area
my_annotation['iscrowd'] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
return (img, my_annotation)
def __len__(self):
return len(self.ids)
def get_transform():
custom_transforms = []
custom_transforms.append(torchvision.transforms.ToTensor())
return torchvision.transforms.Compose(custom_transforms)
def collate_fn(batch):
return tuple(zip(*batch))
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True, progress=True, pretrained_backbone=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
train_batch_size = 1
train_shuffle_dl = True
num_workers_dl = 4
num_classes = 2
num_epochs = 2
lr = 0.005
momentum = 0.9
weight_decay = 0.005
import torch
print('Torch version:', torch.__version__)
my_dataset = myOwnDataset(root=path2data, annotation=path2json, transforms=get_transform())
data_loader = torch.utils.data.DataLoader(my_dataset, batch_size=train_batch_size, shuffle=train_shuffle_dl, num_workers=num_workers_dl, collate_fn=collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
for imgs, annotations in data_loader:
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
model = get_model_instance_segmentation(num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
len_dataloader = len(data_loader)
for epoch in range(num_epochs):
print(f'Epoch: {epoch}/{num_epochs}')
model.train()
i = 0
for imgs, annotations in data_loader:
i += 1
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
loss_dict = model(imgs, annotations)
losses = sum((loss for loss in loss_dict.values()))
optimizer.zero_grad()
losses.backward()
optimizer.step()
print(f'Iteration: {i}/{len_dataloader}, Loss: {losses}') | code |
128029153/cell_4 | [
"image_output_1.png"
] | import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor()) | code |
128029153/cell_6 | [
"text_plain_output_1.png"
] | import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
img, target = coco_train[0]
print(img.size)
print(target) | code |
128029153/cell_2 | [
"text_plain_output_1.png"
] | !pip install pycocotools | code |
128029153/cell_11 | [
"text_plain_output_1.png"
] | from PIL import Image
from PIL import Image, ImageDraw
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import os
import os
import torch
import torch
import torchvision
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
img, target = coco_train[0]
import os
import torch
import torch.utils.data
import torchvision
from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
class myOwnDataset(torch.utils.data.Dataset):
def __init__(self, root, annotation, transforms=None):
self.root = root
self.transforms = transforms
self.coco = COCO(annotation)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
coco_annotation = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path))
num_objs = len(coco_annotation)
boxes = []
area = 0
for i in range(num_objs):
xmin = coco_annotation[i]['bbox'][0]
ymin = coco_annotation[i]['bbox'][1]
xmax = xmin + coco_annotation[i]['bbox'][2]
ymax = ymin + coco_annotation[i]['bbox'][3]
area += (xmax - xmin) * (ymax - ymin)
boxes.append([xmin, ymin, xmax, ymax])
if num_objs == 0:
boxes = torch.zeros((0, 4), dtype=torch.float32)
else:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.ones((num_objs,), dtype=torch.int64)
img_id = torch.tensor([img_id])
areas = []
for i in range(num_objs):
areas.append(coco_annotation[i]['area'])
area = torch.as_tensor(area, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
my_annotation = {}
my_annotation['boxes'] = boxes
my_annotation['labels'] = labels
my_annotation['image_id'] = img_id
my_annotation['area'] = area
my_annotation['iscrowd'] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
return (img, my_annotation)
def __len__(self):
return len(self.ids)
def get_transform():
custom_transforms = []
custom_transforms.append(torchvision.transforms.ToTensor())
return torchvision.transforms.Compose(custom_transforms)
def collate_fn(batch):
return tuple(zip(*batch))
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True, progress=True, pretrained_backbone=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
from PIL import Image, ImageDraw
sample_image_path = '/kaggle/input/levi9-hack9-2023/test/005.jpg'
sample_image = Image.open(sample_image_path)
sample_image | code |
128029153/cell_14 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from PIL import Image
from PIL import Image, ImageDraw
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import os
import os
import torch
import torch
import torchvision
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
img, target = coco_train[0]
import os
import torch
import torch.utils.data
import torchvision
from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
class myOwnDataset(torch.utils.data.Dataset):
def __init__(self, root, annotation, transforms=None):
self.root = root
self.transforms = transforms
self.coco = COCO(annotation)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
coco_annotation = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path))
num_objs = len(coco_annotation)
boxes = []
area = 0
for i in range(num_objs):
xmin = coco_annotation[i]['bbox'][0]
ymin = coco_annotation[i]['bbox'][1]
xmax = xmin + coco_annotation[i]['bbox'][2]
ymax = ymin + coco_annotation[i]['bbox'][3]
area += (xmax - xmin) * (ymax - ymin)
boxes.append([xmin, ymin, xmax, ymax])
if num_objs == 0:
boxes = torch.zeros((0, 4), dtype=torch.float32)
else:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.ones((num_objs,), dtype=torch.int64)
img_id = torch.tensor([img_id])
areas = []
for i in range(num_objs):
areas.append(coco_annotation[i]['area'])
area = torch.as_tensor(area, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
my_annotation = {}
my_annotation['boxes'] = boxes
my_annotation['labels'] = labels
my_annotation['image_id'] = img_id
my_annotation['area'] = area
my_annotation['iscrowd'] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
return (img, my_annotation)
def __len__(self):
return len(self.ids)
def get_transform():
custom_transforms = []
custom_transforms.append(torchvision.transforms.ToTensor())
return torchvision.transforms.Compose(custom_transforms)
def collate_fn(batch):
return tuple(zip(*batch))
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True, progress=True, pretrained_backbone=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
train_batch_size = 1
train_shuffle_dl = True
num_workers_dl = 4
num_classes = 2
num_epochs = 2
lr = 0.005
momentum = 0.9
weight_decay = 0.005
import torch
my_dataset = myOwnDataset(root=path2data, annotation=path2json, transforms=get_transform())
data_loader = torch.utils.data.DataLoader(my_dataset, batch_size=train_batch_size, shuffle=train_shuffle_dl, num_workers=num_workers_dl, collate_fn=collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
for imgs, annotations in data_loader:
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
model = get_model_instance_segmentation(num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
len_dataloader = len(data_loader)
for epoch in range(num_epochs):
model.train()
i = 0
for imgs, annotations in data_loader:
i += 1
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
loss_dict = model(imgs, annotations)
losses = sum((loss for loss in loss_dict.values()))
optimizer.zero_grad()
losses.backward()
optimizer.step()
model.eval()
from PIL import Image, ImageDraw
sample_image_path = '/kaggle/input/levi9-hack9-2023/test/005.jpg'
sample_image = Image.open(sample_image_path)
sample_image
transformed_img = torchvision.transforms.transforms.ToTensor()(sample_image)
result = model([transformed_img.to(device)])
result
boat_id = 1
boat_boxes = [x.cpu().detach().numpy().tolist() for i, x in enumerate(result[0]['boxes']) if result[0]['labels'][i] == boat_id]
boat_boxes
sample_image_annotated = sample_image.copy()
img_bbox = ImageDraw.Draw(sample_image_annotated)
for bbox in boat_boxes:
img_bbox.rectangle(bbox, outline='white')
for bbox in boat_boxes:
x1, x2, x3, x4 = map(int, bbox)
print(x1, x2, x3, x4)
img_bbox.rectangle([x1, x2, x3, x4], outline='red')
sample_image_annotated | code |
128029153/cell_10 | [
"text_plain_output_1.png"
] | from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import os
import os
import torch
import torch
import torchvision
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
img, target = coco_train[0]
import os
import torch
import torch.utils.data
import torchvision
from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
class myOwnDataset(torch.utils.data.Dataset):
def __init__(self, root, annotation, transforms=None):
self.root = root
self.transforms = transforms
self.coco = COCO(annotation)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
coco_annotation = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path))
num_objs = len(coco_annotation)
boxes = []
area = 0
for i in range(num_objs):
xmin = coco_annotation[i]['bbox'][0]
ymin = coco_annotation[i]['bbox'][1]
xmax = xmin + coco_annotation[i]['bbox'][2]
ymax = ymin + coco_annotation[i]['bbox'][3]
area += (xmax - xmin) * (ymax - ymin)
boxes.append([xmin, ymin, xmax, ymax])
if num_objs == 0:
boxes = torch.zeros((0, 4), dtype=torch.float32)
else:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.ones((num_objs,), dtype=torch.int64)
img_id = torch.tensor([img_id])
areas = []
for i in range(num_objs):
areas.append(coco_annotation[i]['area'])
area = torch.as_tensor(area, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
my_annotation = {}
my_annotation['boxes'] = boxes
my_annotation['labels'] = labels
my_annotation['image_id'] = img_id
my_annotation['area'] = area
my_annotation['iscrowd'] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
return (img, my_annotation)
def __len__(self):
return len(self.ids)
def get_transform():
custom_transforms = []
custom_transforms.append(torchvision.transforms.ToTensor())
return torchvision.transforms.Compose(custom_transforms)
def collate_fn(batch):
return tuple(zip(*batch))
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True, progress=True, pretrained_backbone=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
train_batch_size = 1
train_shuffle_dl = True
num_workers_dl = 4
num_classes = 2
num_epochs = 2
lr = 0.005
momentum = 0.9
weight_decay = 0.005
import torch
my_dataset = myOwnDataset(root=path2data, annotation=path2json, transforms=get_transform())
data_loader = torch.utils.data.DataLoader(my_dataset, batch_size=train_batch_size, shuffle=train_shuffle_dl, num_workers=num_workers_dl, collate_fn=collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
for imgs, annotations in data_loader:
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
model = get_model_instance_segmentation(num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
len_dataloader = len(data_loader)
for epoch in range(num_epochs):
model.train()
i = 0
for imgs, annotations in data_loader:
i += 1
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
loss_dict = model(imgs, annotations)
losses = sum((loss for loss in loss_dict.values()))
optimizer.zero_grad()
losses.backward()
optimizer.step()
model.eval() | code |
128029153/cell_12 | [
"text_plain_output_1.png"
] | from PIL import Image
from PIL import Image, ImageDraw
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import os
import os
import torch
import torch
import torchvision
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import os
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
img, target = coco_train[0]
import os
import torch
import torch.utils.data
import torchvision
from PIL import Image
from pycocotools.coco import COCO
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
class myOwnDataset(torch.utils.data.Dataset):
def __init__(self, root, annotation, transforms=None):
self.root = root
self.transforms = transforms
self.coco = COCO(annotation)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
coco_annotation = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path))
num_objs = len(coco_annotation)
boxes = []
area = 0
for i in range(num_objs):
xmin = coco_annotation[i]['bbox'][0]
ymin = coco_annotation[i]['bbox'][1]
xmax = xmin + coco_annotation[i]['bbox'][2]
ymax = ymin + coco_annotation[i]['bbox'][3]
area += (xmax - xmin) * (ymax - ymin)
boxes.append([xmin, ymin, xmax, ymax])
if num_objs == 0:
boxes = torch.zeros((0, 4), dtype=torch.float32)
else:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.ones((num_objs,), dtype=torch.int64)
img_id = torch.tensor([img_id])
areas = []
for i in range(num_objs):
areas.append(coco_annotation[i]['area'])
area = torch.as_tensor(area, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
my_annotation = {}
my_annotation['boxes'] = boxes
my_annotation['labels'] = labels
my_annotation['image_id'] = img_id
my_annotation['area'] = area
my_annotation['iscrowd'] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
return (img, my_annotation)
def __len__(self):
return len(self.ids)
def get_transform():
custom_transforms = []
custom_transforms.append(torchvision.transforms.ToTensor())
return torchvision.transforms.Compose(custom_transforms)
def collate_fn(batch):
return tuple(zip(*batch))
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True, progress=True, pretrained_backbone=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
train_batch_size = 1
train_shuffle_dl = True
num_workers_dl = 4
num_classes = 2
num_epochs = 2
lr = 0.005
momentum = 0.9
weight_decay = 0.005
import torch
my_dataset = myOwnDataset(root=path2data, annotation=path2json, transforms=get_transform())
data_loader = torch.utils.data.DataLoader(my_dataset, batch_size=train_batch_size, shuffle=train_shuffle_dl, num_workers=num_workers_dl, collate_fn=collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
for imgs, annotations in data_loader:
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
model = get_model_instance_segmentation(num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
len_dataloader = len(data_loader)
for epoch in range(num_epochs):
model.train()
i = 0
for imgs, annotations in data_loader:
i += 1
imgs = list((img.to(device) for img in imgs))
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
loss_dict = model(imgs, annotations)
losses = sum((loss for loss in loss_dict.values()))
optimizer.zero_grad()
losses.backward()
optimizer.step()
model.eval()
from PIL import Image, ImageDraw
sample_image_path = '/kaggle/input/levi9-hack9-2023/test/005.jpg'
sample_image = Image.open(sample_image_path)
sample_image
transformed_img = torchvision.transforms.transforms.ToTensor()(sample_image)
result = model([transformed_img.to(device)])
result | code |
128029153/cell_5 | [
"text_plain_output_1.png"
] | import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
path2data = '/kaggle/input/levi9-hack9-2023/train'
path2json = '/kaggle/input/levi9-hack9-2023/train.json'
coco_train = dset.CocoDetection(root=path2data, annFile=path2json, transform=transforms.ToTensor())
print('Number of samples: ', len(coco_train)) | code |
16121288/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
import numpy as np # linear algebra
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
from sklearn import metrics
print('MAE', metrics.mean_absolute_error(y_test, y_pred))
print('MSE', metrics.mean_squared_error(y_test, y_pred))
print('RMSE', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) | code |
16121288/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum()
corrmat=df.corr()
fig=plt.figure(figsize=(12,9))
sns.heatmap(corrmat,vmax=.8, square= True,annot=True)
plt.show()
print(df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).head(10))
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).head(50)
country_wise.plot(kind='bar', legend=False, figsize=(20, 8))
plt.show() | code |
16121288/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.describe(include='all') | code |
16121288/cell_20 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum()
corrmat=df.corr()
fig=plt.figure(figsize=(12,9))
sns.heatmap(corrmat,vmax=.8, square= True,annot=True)
plt.show()
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).head(50)
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).tail(50)
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']]
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
plt.scatter(y_test, y_pred)
plt.xlabel('Y Test')
plt.ylabel('Predicted y') | code |
16121288/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum()
sns.pairplot(data=df) | code |
16121288/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum()
corrmat=df.corr()
fig=plt.figure(figsize=(12,9))
sns.heatmap(corrmat,vmax=.8, square= True,annot=True)
plt.show()
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).head(50)
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).tail(50)
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']]
country_wise.plot(kind='line', legend=False, figsize=(20, 8))
plt.show() | code |
16121288/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
print(os.listdir('../input')) | code |
16121288/cell_18 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
regressor.fit(X_train, y_train) | code |
16121288/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum()
corrmat = df.corr()
fig = plt.figure(figsize=(12, 9))
sns.heatmap(corrmat, vmax=0.8, square=True, annot=True)
plt.show() | code |
16121288/cell_3 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.head() | code |
16121288/cell_22 | [
"image_output_1.png"
] | from sklearn import metrics
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
import numpy as np # linear algebra
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
from sklearn import metrics
from sklearn import metrics
metrics.r2_score(y_test, y_pred) | code |
16121288/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum()
corrmat=df.corr()
fig=plt.figure(figsize=(12,9))
sns.heatmap(corrmat,vmax=.8, square= True,annot=True)
plt.show()
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).head(50)
country_wise = df[['Country (region)', 'Healthy life\nexpectancy']].groupby('Country (region)').mean().sort_values('Healthy life\nexpectancy', ascending=False).tail(50)
country_wise.plot(kind='bar', legend=False, figsize=(20, 8), color='red')
plt.show() | code |
16121288/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum()
corrmat=df.corr()
fig=plt.figure(figsize=(12,9))
sns.heatmap(corrmat,vmax=.8, square= True,annot=True)
plt.show()
sns.scatterplot(x='Ladder', y='Healthy life\nexpectancy', data=df) | code |
16121288/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/world-happiness-report-2019.csv')
df.isnull().sum() | code |
18127116/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
df
y2 = df.rider_performance
features2 = ['time', 'km', 'horse_performance']
X2 = df[features2]
rider_performance_model = DecisionTreeRegressor(random_state=1)
rider_performance_model.fit(X2, y2)
predictions2 = rider_performance_model.predict(X2)
print(predictions2)
df | code |
18127116/cell_9 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from pandas import DataFrame
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
performance_df = pd.DataFrame({'Rider performance': df['rider_performance'], 'Horse performance': df['horse_performance']})
perfrormance_graph_comparison1 = performance_df.plot.bar(rot=0) | code |
18127116/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x='date', y='time', rot=0)
time_graph.set_xlabel('Date')
time_graph.set_ylabel('Time') | code |
18127116/cell_6 | [
"text_plain_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x='date', y='rider_performance', rot=0)
rider_performance_graph.set_xlabel('Date')
rider_performance_graph.set_ylabel('Rider perforamce') | code |
18127116/cell_19 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from pandas import DataFrame
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
df
y2 = df.rider_performance
features2 = ['time', 'km', 'horse_performance']
X2 = df[features2]
rider_performance_model = DecisionTreeRegressor(random_state=1)
rider_performance_model.fit(X2, y2)
predictions2 = rider_performance_model.predict(X2)
df
y3 = df.km
features3 = ['time']
X3 = df[features3]
km_model = DecisionTreeRegressor(random_state=1)
km_model.fit(X3, y3)
predictions3 = km_model.predict(X3)
df
y4 = df.time
features4 = ['km']
X4 = df[features4]
time_model = DecisionTreeRegressor(random_state=1)
time_model.fit(X4, y4)
predictions4 = time_model.predict(X4)
df
y5 = df.km
features5 = ['time', 'rider_performance']
X5 = df[features5]
km_model2 = DecisionTreeRegressor(random_state=1)
km_model2.fit(X5, y5)
predictions5 = km_model2.predict(X5)
df
y6 = df.km
features6 = ['time', 'horse_performance']
X6 = df[features6]
km_model3 = DecisionTreeRegressor(random_state=1)
km_model3.fit(X6, y6)
predictions6 = km_model3.predict(X6)
print(predictions6)
print(predictions3)
df | code |
18127116/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18127116/cell_7 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x='date', y='horse_performance', rot=0)
horse_performance_graph.set_xlabel('Date')
horse_performance_graph.set_ylabel('Horse perforamce') | code |
18127116/cell_18 | [
"image_output_1.png"
] | from pandas import DataFrame
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
df
y2 = df.rider_performance
features2 = ['time', 'km', 'horse_performance']
X2 = df[features2]
rider_performance_model = DecisionTreeRegressor(random_state=1)
rider_performance_model.fit(X2, y2)
predictions2 = rider_performance_model.predict(X2)
df
y3 = df.km
features3 = ['time']
X3 = df[features3]
km_model = DecisionTreeRegressor(random_state=1)
km_model.fit(X3, y3)
predictions3 = km_model.predict(X3)
df
y4 = df.time
features4 = ['km']
X4 = df[features4]
time_model = DecisionTreeRegressor(random_state=1)
time_model.fit(X4, y4)
predictions4 = time_model.predict(X4)
df
y5 = df.km
features5 = ['time', 'rider_performance']
X5 = df[features5]
km_model2 = DecisionTreeRegressor(random_state=1)
km_model2.fit(X5, y5)
predictions5 = km_model2.predict(X5)
print(predictions5)
print(predictions3)
df | code |
18127116/cell_8 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x='date', y='avg_performance', rot=0)
avg_performance_graph.set_xlabel('Date')
avg_performance_graph.set_ylabel('Average perforamce') | code |
18127116/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
df
y2 = df.rider_performance
features2 = ['time', 'km', 'horse_performance']
X2 = df[features2]
rider_performance_model = DecisionTreeRegressor(random_state=1)
rider_performance_model.fit(X2, y2)
predictions2 = rider_performance_model.predict(X2)
df
y3 = df.km
features3 = ['time']
X3 = df[features3]
km_model = DecisionTreeRegressor(random_state=1)
km_model.fit(X3, y3)
predictions3 = km_model.predict(X3)
df
train_X2, val_X2, train_y2, val_y2 = train_test_split(X3, y3, random_state=1)
val_predictions2 = km_model.predict(val_X2)
print(val_predictions2)
from sklearn.metrics import mean_absolute_error
val_mae = mean_absolute_error(val_predictions2, val_y2)
print(val_mae)
df | code |
18127116/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
df
y2 = df.rider_performance
features2 = ['time', 'km', 'horse_performance']
X2 = df[features2]
rider_performance_model = DecisionTreeRegressor(random_state=1)
rider_performance_model.fit(X2, y2)
predictions2 = rider_performance_model.predict(X2)
df
y3 = df.km
features3 = ['time']
X3 = df[features3]
km_model = DecisionTreeRegressor(random_state=1)
km_model.fit(X3, y3)
predictions3 = km_model.predict(X3)
df
y4 = df.time
features4 = ['km']
X4 = df[features4]
time_model = DecisionTreeRegressor(random_state=1)
time_model.fit(X4, y4)
predictions4 = time_model.predict(X4)
print(predictions4)
df | code |
18127116/cell_3 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df | code |
18127116/cell_17 | [
"image_output_1.png"
] | from pandas import DataFrame
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
df
y2 = df.rider_performance
features2 = ['time', 'km', 'horse_performance']
X2 = df[features2]
rider_performance_model = DecisionTreeRegressor(random_state=1)
rider_performance_model.fit(X2, y2)
predictions2 = rider_performance_model.predict(X2)
df
y3 = df.km
features3 = ['time']
X3 = df[features3]
km_model = DecisionTreeRegressor(random_state=1)
km_model.fit(X3, y3)
predictions3 = km_model.predict(X3)
df
train_X2, val_X2, train_y2, val_y2 = train_test_split(X3, y3, random_state=1)
val_predictions2 = km_model.predict(val_X2)
from sklearn.metrics import mean_absolute_error
val_mae = mean_absolute_error(val_predictions2, val_y2)
df
y4 = df.time
features4 = ['km']
X4 = df[features4]
time_model = DecisionTreeRegressor(random_state=1)
time_model.fit(X4, y4)
predictions4 = time_model.predict(X4)
df
from sklearn.model_selection import train_test_split
train_X, val_X, train_y, val_y = train_test_split(X4, y4, random_state=1)
val_predictions = time_model.predict(val_X)
print(val_predictions)
from sklearn.metrics import mean_absolute_error
val_mae = mean_absolute_error(val_predictions, val_y)
print(val_mae) | code |
18127116/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
df
y2 = df.rider_performance
features2 = ['time', 'km', 'horse_performance']
X2 = df[features2]
rider_performance_model = DecisionTreeRegressor(random_state=1)
rider_performance_model.fit(X2, y2)
predictions2 = rider_performance_model.predict(X2)
df
y3 = df.km
features3 = ['time']
X3 = df[features3]
km_model = DecisionTreeRegressor(random_state=1)
km_model.fit(X3, y3)
predictions3 = km_model.predict(X3)
print(predictions3)
df | code |
18127116/cell_10 | [
"text_html_output_1.png"
] | from pandas import DataFrame
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
performance_df = pd.DataFrame({'Rider performance': df["rider_performance"],
'Horse performance': df["horse_performance"]})
perfrormance_graph_comparison1 = performance_df.plot.bar(rot=0)
performance_df2 = pd.DataFrame({'Rider performance': df['rider_performance'], 'Horse performance': df['horse_performance'], 'Average performance': df['avg_performance']})
perfrormance_graph_comparison2 = performance_df2.plot.bar(rot=0) | code |
18127116/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
from sklearn.tree import DecisionTreeRegressor
y = df.horse_performance
features = ['time', 'km', 'rider_performance']
X = df[features]
horse_performance_model = DecisionTreeRegressor(random_state=1)
horse_performance_model.fit(X, y)
predictions = horse_performance_model.predict(X)
print(predictions)
df | code |
18127116/cell_5 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x='date', y='km', rot=0)
km_graph.set_xlabel('Date')
km_graph.set_ylabel('Km') | code |
16147946/cell_3 | [
"text_plain_output_1.png"
] | !ls ../input | code |
16147946/cell_10 | [
"text_plain_output_1.png"
] | from pathlib import Path
import pandas as pd
import pandas as pd
from pathlib import Path
input_root_path = Path('../input')
sub = pd.read_csv(input_root_path.joinpath('sample_submission.csv'))
all_zeros = sub.copy()
all_zeros['y'] = 0
all_zeros.to_csv('baseline_probe_0.0.csv', index=False)
P_bp = -59.2822
idx_1_replace_100 = all_zeros.copy()
idx_1_replace_100['y'][0] = 100
idx_1_replace_100.to_csv('probe_0001_100.csv', index=False)
P_1_100 = -59.25187
idx_1_replace_100['y'][0] = 200
idx_1_replace_100.to_csv('probe_0001_200.csv', index=False)
P_1_200 = -59.36366
S_tot = 20000.0 / (2 * P_1_100 - P_bp - P_1_200)
print('Stot is : {:.5f}'.format(S_tot)) | code |
16147946/cell_12 | [
"text_plain_output_1.png"
] | from pathlib import Path
import pandas as pd
import pandas as pd
from pathlib import Path
input_root_path = Path('../input')
sub = pd.read_csv(input_root_path.joinpath('sample_submission.csv'))
all_zeros = sub.copy()
all_zeros['y'] = 0
all_zeros.to_csv('baseline_probe_0.0.csv', index=False)
P_bp = -59.2822
idx_1_replace_100 = all_zeros.copy()
idx_1_replace_100['y'][0] = 100
idx_1_replace_100.to_csv('probe_0001_100.csv', index=False)
P_1_100 = -59.25187
idx_1_replace_100['y'][0] = 200
idx_1_replace_100.to_csv('probe_0001_200.csv', index=False)
P_1_200 = -59.36366
S_tot = 20000.0 / (2 * P_1_100 - P_bp - P_1_200)
def calc_y_value_any_idx(P_any_idx, p_bp=P_bp, s_tot=S_tot):
return (s_tot * (P_any_idx - p_bp) + 10000.0) / 200.0
print('y_1 is : {:.5f}'.format(calc_y_value_any_idx(P_1_100))) | code |
128018068/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
d = pd.read_excel('/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx')
d
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
x_train = d[['Area', 'Perimeter', 'Major_Axis_Length', 'Minor_Axis_Length', 'Eccentricity', 'Convex_Area', 'Extent']]
y_train = d['Class']
k = 3
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(x_train, y_train)
x_test = np.array([[1.2, 1.0, 2.8, 1.2, 1.0, 2.8, 1.2]])
target = knn.predict(x_test)
from sklearn.model_selection import train_test_split
x_train, x_holdout, y_train, y_holdout = train_test_split(d[['Area', 'Perimeter', 'Major_Axis_Length', 'Minor_Axis_Length', 'Eccentricity', 'Convex_Area', 'Extent']], d['Class'], test_size=0.3, random_state=17)
n_list = list(range(1, 50))
acclist = []
for n in n_list:
knn = KNeighborsClassifier(n_neighbors=n)
knn.fit(x_train, y_train)
knn_pred = knn.predict(x_holdout)
accur = accuracy_score(y_holdout, knn_pred)
acclist.append(accur)
plt.plot(n_list, acclist, label='Доля выборки при 0,3')
plt.plot(n_list, acclist3, label='Доля выборки при 0,2')
plt.plot(n_list, acclist2, label='Доля выборки при 0,1')
plt.legend()
plt.xlabel('Количество соседей (K)')
plt.ylabel('Ошибка классификации (MSE)')
plt.show() | code |
128018068/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sb
d = pd.read_excel('/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx')
d
sb.pairplot(d) | code |
128018068/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sb
d = pd.read_excel('/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx')
d
sb.pairplot(d, hue='Class') | code |
128018068/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
d = pd.read_excel('/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx')
d | code |
128018068/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
d = pd.read_excel('/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx')
d
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
x_train = d[['Area', 'Perimeter', 'Major_Axis_Length', 'Minor_Axis_Length', 'Eccentricity', 'Convex_Area', 'Extent']]
y_train = d['Class']
k = 3
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(x_train, y_train)
x_test = np.array([[1.2, 1.0, 2.8, 1.2, 1.0, 2.8, 1.2]])
target = knn.predict(x_test)
print(target) | code |
128018068/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
d = pd.read_excel('/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx')
d
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
x_train = d[['Area', 'Perimeter', 'Major_Axis_Length', 'Minor_Axis_Length', 'Eccentricity', 'Convex_Area', 'Extent']]
y_train = d['Class']
k = 3
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(x_train, y_train)
x_test = np.array([[1.2, 1.0, 2.8, 1.2, 1.0, 2.8, 1.2]])
target = knn.predict(x_test)
from sklearn.model_selection import train_test_split
x_train, x_holdout, y_train, y_holdout = train_test_split(d[['Area', 'Perimeter', 'Major_Axis_Length', 'Minor_Axis_Length', 'Eccentricity', 'Convex_Area', 'Extent']], d['Class'], test_size=0.3, random_state=17)
n_list = list(range(1, 50))
acclist = []
for n in n_list:
knn = KNeighborsClassifier(n_neighbors=n)
knn.fit(x_train, y_train)
knn_pred = knn.predict(x_holdout)
accur = accuracy_score(y_holdout, knn_pred)
print('accuracy: ', accur)
acclist.append(accur) | code |
33111788/cell_13 | [
"text_html_output_1.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from sklearn.metrics import accuracy_score, confusion_matrix, log_loss, roc_auc_score
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid))
list_output_auc = []
for x in range(8):
list_output_auc.append([float(auc_roc_score(list_np_H_value_valid[x][:, i], np_y_value_valid[:, i])) for i in range(14)])
df_output_auc = pd.DataFrame(list_output_auc, columns=list_classes)
df_output_auc.index = [f'b{x}' for x in range(8)]
df_output_auc
list_output_accuracy = []
for x in range(8):
list_output_accuracy.append([float(accuracy_score(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5)) for i in range(14)])
df_output_accuracy = pd.DataFrame(list_output_accuracy, columns=list_classes)
df_output_accuracy.index = [f'b{x}' for x in range(8)]
df_output_accuracy
list_output_logloss = []
for x in range(8):
list_output_logloss.append([float(log_loss(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i])) for i in range(14)])
df_output_logloss = pd.DataFrame(list_output_logloss, columns=list_classes)
df_output_logloss.index = [f'b{x}' for x in range(8)]
df_output_logloss
list_output_tp = []
for x in range(8):
list_output_tp.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[3]) for i in range(14)])
df_output_tp = pd.DataFrame(list_output_tp, columns=list_classes)
df_output_tp.index = [f'b{x}' for x in range(8)]
df_output_tp | code |
33111788/cell_9 | [
"text_html_output_1.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid)) | code |
33111788/cell_11 | [
"application_vnd.jupyter.stderr_output_9.png",
"application_vnd.jupyter.stderr_output_7.png",
"application_vnd.jupyter.stderr_output_11.png",
"text_plain_output_4.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"application_vnd.jupyter.stderr_output_13.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_16.png",
"application_vnd.jupyter.stderr_output_15.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"text_plain_output_12.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from sklearn.metrics import accuracy_score, confusion_matrix, log_loss, roc_auc_score
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid))
list_output_auc = []
for x in range(8):
list_output_auc.append([float(auc_roc_score(list_np_H_value_valid[x][:, i], np_y_value_valid[:, i])) for i in range(14)])
df_output_auc = pd.DataFrame(list_output_auc, columns=list_classes)
df_output_auc.index = [f'b{x}' for x in range(8)]
df_output_auc
list_output_accuracy = []
for x in range(8):
list_output_accuracy.append([float(accuracy_score(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5)) for i in range(14)])
df_output_accuracy = pd.DataFrame(list_output_accuracy, columns=list_classes)
df_output_accuracy.index = [f'b{x}' for x in range(8)]
df_output_accuracy | code |
33111788/cell_1 | [
"text_plain_output_1.png"
] | !pip install efficientnet_pytorch | code |
33111788/cell_7 | [
"text_html_output_1.png"
] | from pathlib import Path
from tqdm import tqdm
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True) | code |
33111788/cell_15 | [
"text_html_output_1.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from sklearn.metrics import accuracy_score, confusion_matrix, log_loss, roc_auc_score
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid))
list_output_auc = []
for x in range(8):
list_output_auc.append([float(auc_roc_score(list_np_H_value_valid[x][:, i], np_y_value_valid[:, i])) for i in range(14)])
df_output_auc = pd.DataFrame(list_output_auc, columns=list_classes)
df_output_auc.index = [f'b{x}' for x in range(8)]
df_output_auc
list_output_accuracy = []
for x in range(8):
list_output_accuracy.append([float(accuracy_score(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5)) for i in range(14)])
df_output_accuracy = pd.DataFrame(list_output_accuracy, columns=list_classes)
df_output_accuracy.index = [f'b{x}' for x in range(8)]
df_output_accuracy
list_output_logloss = []
for x in range(8):
list_output_logloss.append([float(log_loss(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i])) for i in range(14)])
df_output_logloss = pd.DataFrame(list_output_logloss, columns=list_classes)
df_output_logloss.index = [f'b{x}' for x in range(8)]
df_output_logloss
list_output_tp = []
for x in range(8):
list_output_tp.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[3]) for i in range(14)])
df_output_tp = pd.DataFrame(list_output_tp, columns=list_classes)
df_output_tp.index = [f'b{x}' for x in range(8)]
df_output_tp
list_output_fp = []
for x in range(8):
list_output_fp.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[1]) for i in range(14)])
df_output_fp = pd.DataFrame(list_output_fp, columns=list_classes)
df_output_fp.index = [f'b{x}' for x in range(8)]
df_output_fp
list_output_fn = []
for x in range(8):
list_output_fn.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[2]) for i in range(14)])
df_output_fn = pd.DataFrame(list_output_fn, columns=list_classes)
df_output_fn.index = [f'b{x}' for x in range(8)]
df_output_fn | code |
33111788/cell_16 | [
"text_html_output_1.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from sklearn.metrics import accuracy_score, confusion_matrix, log_loss, roc_auc_score
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid))
list_output_auc = []
for x in range(8):
list_output_auc.append([float(auc_roc_score(list_np_H_value_valid[x][:, i], np_y_value_valid[:, i])) for i in range(14)])
df_output_auc = pd.DataFrame(list_output_auc, columns=list_classes)
df_output_auc.index = [f'b{x}' for x in range(8)]
df_output_auc
list_output_accuracy = []
for x in range(8):
list_output_accuracy.append([float(accuracy_score(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5)) for i in range(14)])
df_output_accuracy = pd.DataFrame(list_output_accuracy, columns=list_classes)
df_output_accuracy.index = [f'b{x}' for x in range(8)]
df_output_accuracy
list_output_logloss = []
for x in range(8):
list_output_logloss.append([float(log_loss(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i])) for i in range(14)])
df_output_logloss = pd.DataFrame(list_output_logloss, columns=list_classes)
df_output_logloss.index = [f'b{x}' for x in range(8)]
df_output_logloss
list_output_tp = []
for x in range(8):
list_output_tp.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[3]) for i in range(14)])
df_output_tp = pd.DataFrame(list_output_tp, columns=list_classes)
df_output_tp.index = [f'b{x}' for x in range(8)]
df_output_tp
list_output_fp = []
for x in range(8):
list_output_fp.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[1]) for i in range(14)])
df_output_fp = pd.DataFrame(list_output_fp, columns=list_classes)
df_output_fp.index = [f'b{x}' for x in range(8)]
df_output_fp
list_output_fn = []
for x in range(8):
list_output_fn.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[2]) for i in range(14)])
df_output_fn = pd.DataFrame(list_output_fn, columns=list_classes)
df_output_fn.index = [f'b{x}' for x in range(8)]
df_output_fn
list_output_tn = []
for x in range(8):
list_output_tn.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[0]) for i in range(14)])
df_output_tn = pd.DataFrame(list_output_tn, columns=list_classes)
df_output_tn.index = [f'b{x}' for x in range(8)]
df_output_tn | code |
33111788/cell_14 | [
"text_html_output_1.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from sklearn.metrics import accuracy_score, confusion_matrix, log_loss, roc_auc_score
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid))
list_output_auc = []
for x in range(8):
list_output_auc.append([float(auc_roc_score(list_np_H_value_valid[x][:, i], np_y_value_valid[:, i])) for i in range(14)])
df_output_auc = pd.DataFrame(list_output_auc, columns=list_classes)
df_output_auc.index = [f'b{x}' for x in range(8)]
df_output_auc
list_output_accuracy = []
for x in range(8):
list_output_accuracy.append([float(accuracy_score(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5)) for i in range(14)])
df_output_accuracy = pd.DataFrame(list_output_accuracy, columns=list_classes)
df_output_accuracy.index = [f'b{x}' for x in range(8)]
df_output_accuracy
list_output_logloss = []
for x in range(8):
list_output_logloss.append([float(log_loss(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i])) for i in range(14)])
df_output_logloss = pd.DataFrame(list_output_logloss, columns=list_classes)
df_output_logloss.index = [f'b{x}' for x in range(8)]
df_output_logloss
list_output_tp = []
for x in range(8):
list_output_tp.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[3]) for i in range(14)])
df_output_tp = pd.DataFrame(list_output_tp, columns=list_classes)
df_output_tp.index = [f'b{x}' for x in range(8)]
df_output_tp
list_output_fp = []
for x in range(8):
list_output_fp.append([int(confusion_matrix(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5).ravel()[1]) for i in range(14)])
df_output_fp = pd.DataFrame(list_output_fp, columns=list_classes)
df_output_fp.index = [f'b{x}' for x in range(8)]
df_output_fp | code |
33111788/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid))
list_output_auc = []
for x in range(8):
list_output_auc.append([float(auc_roc_score(list_np_H_value_valid[x][:, i], np_y_value_valid[:, i])) for i in range(14)])
df_output_auc = pd.DataFrame(list_output_auc, columns=list_classes)
df_output_auc.index = [f'b{x}' for x in range(8)]
df_output_auc | code |
33111788/cell_12 | [
"text_html_output_1.png"
] | from efficientnet_pytorch import EfficientNet
from pathlib import Path
from sklearn.metrics import accuracy_score, confusion_matrix, log_loss, roc_auc_score
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
path_working_dir = Path().resolve()
path_input_nih = (path_working_dir / f'../input/data').resolve()
path_input_alias = (path_working_dir / f'./alias').resolve()
path_input_model = (path_working_dir / f'../input/nih-chest-xrays-trained-models').resolve()
IMAGE_SIZE = 224
BATCH_SIZE = 24
tfms = get_transforms(do_flip=False, flip_vert=False, max_rotate=20, max_zoom=1.2, max_warp=0.25, p_affine=0.7, max_lighting=0.4, p_lighting=0.5)
df_input_all = pd.read_csv(path_input_nih / 'Data_Entry_2017.csv')
df_input_all['Finding Labels'] = df_input_all['Finding Labels'].str.replace('No Finding', '')
df_list_train = pd.read_csv(path_input_nih / 'train_val_list.txt', header=None)
df_list_valid = pd.read_csv(path_input_nih / 'test_list.txt', header=None)
list_all = df_input_all['Image Index'].tolist()
list_train = df_list_train[0].tolist()
list_valid = df_list_valid[0].tolist()
list_idx_train = [True if fname in list_train else False for fname in tqdm(list_all)]
list_idx_valid = [True if fname in list_valid else False for fname in tqdm(list_all)]
list_classes = sorted([target for target in set(df_input_all['Finding Labels'].tolist()) if not '|' in target and target != ''])
df_input_train = df_input_all[list_idx_train].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_valid = df_input_all[list_idx_valid].reset_index(drop=True)[['Image Index', 'Finding Labels']]
df_input_merge = pd.concat([df_input_train, df_input_valid]).reset_index(drop=True)
img_list = ImageList.from_df(df_input_merge, path_input_alias, convert_mode='L')
np_y_value_valid = LongTensor(np.array([[1 if list_classes[i] in label else 0 for label in df_input_valid['Finding Labels'].tolist()] for i in range(14)]).T)
list_BATCH_SIZE = [160, 112, 104, 80, 56, 40, 32, 24]
list_np_H_value_valid = []
for x in range(8):
data = img_list.split_by_idxs(list(range(len(df_input_train))), list(range(len(df_input_train), len(df_input_train) + len(df_input_valid)))).label_from_df(cols='Finding Labels', classes=list_classes, label_delim='|').transform(tfms, size=IMAGE_SIZE).databunch(bs=list_BATCH_SIZE[x], num_workers=os.cpu_count())
bx = f'b{x}'
model = EfficientNet.from_pretrained(f'efficientnet-{bx}', num_classes=14, in_channels=1)
learn = Learner(data, model)
learn = learn.load(path_input_model / f'efficientnet-b{x}_224x224x1_epoch_30/model_unfreeze_best')
np_H_value_valid, np_H_01_valid, np_loss_valid = learn.get_preds(DatasetType.Valid, with_loss=True)
list_np_H_value_valid.append(np_H_value_valid)
np.save(f'np_H_value_valid_b{x}.npy', np.array(np_H_value_valid))
list_output_auc = []
for x in range(8):
list_output_auc.append([float(auc_roc_score(list_np_H_value_valid[x][:, i], np_y_value_valid[:, i])) for i in range(14)])
df_output_auc = pd.DataFrame(list_output_auc, columns=list_classes)
df_output_auc.index = [f'b{x}' for x in range(8)]
df_output_auc
list_output_accuracy = []
for x in range(8):
list_output_accuracy.append([float(accuracy_score(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i] >= 0.5)) for i in range(14)])
df_output_accuracy = pd.DataFrame(list_output_accuracy, columns=list_classes)
df_output_accuracy.index = [f'b{x}' for x in range(8)]
df_output_accuracy
list_output_logloss = []
for x in range(8):
list_output_logloss.append([float(log_loss(np_y_value_valid[:, i], list_np_H_value_valid[x][:, i])) for i in range(14)])
df_output_logloss = pd.DataFrame(list_output_logloss, columns=list_classes)
df_output_logloss.index = [f'b{x}' for x in range(8)]
df_output_logloss | code |
89135215/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
x_new['LotFrontage'].hist(bins=50)
plt.show() | code |
89135215/cell_25 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
corr = x_new.corr()
plt.figure(figsize=(25, 25))
sns.heatmap(corr, annot=True)
plt.show() | code |
89135215/cell_23 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
sns.distplot(x_new['BsmtUnfSF']) | code |
89135215/cell_20 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
sns.distplot(x_new['BsmtUnfSF']) | code |
89135215/cell_2 | [
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89135215/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
Numlist1 = ['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageQual', 'GarageCond']
Numlist2 = ['BsmtExposure']
Numlist3 = ['BsmtFinType1', 'BsmtFinType2']
Numlist4 = ['PoolQC']
Numlist5 = ['Fence']
Numlist6 = ['ExterQual', 'ExterCond', 'HeatingQC', 'KitchenQual']
Numlist7 = ['LotShape']
Numlist8 = ['LandSlope']
Numlist9 = ['Functional']
Numlist10 = ['GarageFinish']
def numeric_map1(x):
return x.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, np.nan: 0})
def numeric_map2(y):
return y.map({'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4, np.nan: 0})
def numeric_map3(z):
return z.map({'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6, np.nan: 0})
def numeric_map4(a):
return a.map({'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex': 4, np.nan: 0})
def numeric_map5(b):
return b.map({'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4, np.nan: 0})
def numeric_map6(c):
return c.map({'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5})
def numeric_map7(d):
return d.map({'IR3': 1, 'IR2': 2, 'IR1': 3, 'Reg': 4})
def numeric_map8(e):
return e.map({'Sev': 1, 'Mod': 2, 'Gtl': 3})
def numeric_map9(f):
return f.map({'Sal': 1, 'Sev': 2, 'Maj2': 3, 'Maj1': 4, 'Mod': 5, 'Min2': 6, 'Min1': 7, 'Typ': 8})
def numeric_map10(g):
return g.map({'Unf': 1, 'RFn': 2, 'Fin': 3, np.nan: 0})
house_train[Numlist1] = house_train[Numlist1].apply(numeric_map1)
house_train[Numlist2] = house_train[Numlist2].apply(numeric_map2)
house_train[Numlist3] = house_train[Numlist3].apply(numeric_map3)
house_train[Numlist4] = house_train[Numlist4].apply(numeric_map4)
house_train[Numlist5] = house_train[Numlist5].apply(numeric_map5)
house_train[Numlist6] = house_train[Numlist6].apply(numeric_map6)
house_train[Numlist7] = house_train[Numlist7].apply(numeric_map7)
house_train[Numlist8] = house_train[Numlist8].apply(numeric_map8)
house_train[Numlist9] = house_train[Numlist9].apply(numeric_map9)
house_train[Numlist10] = house_train[Numlist10].apply(numeric_map10)
house_test[Numlist1] = house_test[Numlist1].apply(numeric_map1)
house_test[Numlist2] = house_test[Numlist2].apply(numeric_map2)
house_test[Numlist3] = house_test[Numlist3].apply(numeric_map3)
house_test[Numlist4] = house_test[Numlist4].apply(numeric_map4)
house_test[Numlist5] = house_test[Numlist5].apply(numeric_map5)
house_test[Numlist6] = house_test[Numlist6].apply(numeric_map6)
house_test[Numlist7] = house_test[Numlist7].apply(numeric_map7)
house_test[Numlist8] = house_test[Numlist8].apply(numeric_map8)
house_test[Numlist9] = house_test[Numlist9].apply(numeric_map9)
house_test[Numlist10] = house_test[Numlist10].apply(numeric_map10)
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col
scaler = StandardScaler()
x = train.iloc[:, 1:-1]
y = train['SalePrice']
uncorrlated = [i for i in col.keys() if col[i] < 0.05]
uncorrlated.remove('Id')
x_new = x.drop(columns=uncorrlated)
test = test.drop(columns=uncorrlated)
x_new.hist(bins=50, figsize=(50, 50))
plt.show() | code |
89135215/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
train.info() | code |
89135215/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
house_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
house_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train = house_train.select_dtypes(exclude=['object'])
test = house_test.select_dtypes(exclude=['object'])
corr = train.corr()
col = corr['SalePrice'].sort_values(ascending=False).abs()
col | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.