path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
130003964/cell_18
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') df.columns years_exp = df.YearsExperience.values years_exp salary = df.Salary.values salary x = years_exp y = salary plt.plot x = x.reshape(-1, 1) x plt.plot lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict([[1.2], [3.3]]) y_predict lr.score(x_test, y_test) * 100 y_predict = lr.predict(x_test) y_predict plt.scatter(x, y, color='blue') plt.scatter(x_test, y_predict, color='red') plt.xlabel('YearsExperience') plt.ylabel('Salary') plt.plot
code
130003964/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') df.columns years_exp = df.YearsExperience.values years_exp salary = df.Salary.values salary x = years_exp y = salary plt.scatter(x, y, color='blue') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.plot
code
130003964/cell_15
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict([[1.2], [3.3]]) y_predict
code
130003964/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict([[1.2], [3.3]]) y_predict lr.score(x_test, y_test) * 100
code
130003964/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') df.columns
code
130003964/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict([[1.2], [3.3]]) y_predict lr.score(x_test, y_test) * 100 y_predict = lr.predict(x_test) y_predict
code
130003964/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train)
code
130003964/cell_12
[ "text_plain_output_1.png" ]
(x_test, len(x_test))
code
130003964/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') df.columns years_exp = df.YearsExperience.values years_exp
code
128048739/cell_21
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(1, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out()
code
128048739/cell_9
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray()
code
128048739/cell_33
[ "text_html_output_1.png" ]
from transformers import AutoTokenizer, BertModel from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased')
code
128048739/cell_20
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(1, 2)) X = cv.fit_transform(sentences) X.toarray()
code
128048739/cell_29
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(1, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] tfidf = TfidfVectorizer() X = tfidf.fit_transform(sentences) X.toarray() tfidf.get_feature_names_out() df = pd.DataFrame(X[0].T.todense(), index=tfidf.get_feature_names_out(), columns=['TF-IDF']) df.sort_values('TF-IDF', ascending=False)
code
128048739/cell_26
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(1, 2)) X = cv.fit_transform(sentences) X.toarray() sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] tfidf = TfidfVectorizer() X = tfidf.fit_transform(sentences) X.toarray()
code
128048739/cell_11
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out()
code
128048739/cell_28
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(1, 2)) X = cv.fit_transform(sentences) X.toarray() sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] tfidf = TfidfVectorizer() X = tfidf.fit_transform(sentences) X.toarray() X[0].T.todense()
code
128048739/cell_15
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray()
code
128048739/cell_16
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out()
code
128048739/cell_17
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out())
code
128048739/cell_35
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_6.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from transformers import AutoTokenizer, BertModel import torch from transformers import AutoTokenizer, BertModel tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') example_word = 'cyber' example_token_id = tokenizer.convert_tokens_to_ids([example_word])[0] example_embedding = model.embeddings.word_embeddings(torch.tensor([example_token_id])) print(example_embedding.shape) print(example_embedding)
code
128048739/cell_22
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(1, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out())
code
128048739/cell_27
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(2, 2)) X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out()) cv = CountVectorizer(ngram_range=(1, 2)) X = cv.fit_transform(sentences) X.toarray() sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] tfidf = TfidfVectorizer() X = tfidf.fit_transform(sentences) X.toarray() tfidf.get_feature_names_out()
code
128048739/cell_12
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sentences = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?'] cv = CountVectorizer() X = cv.fit_transform(sentences) X.toarray() cv.get_feature_names_out() pd.DataFrame(cv.get_feature_names_out())
code
32067919/cell_4
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] train_.head()
code
32067919/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] EMPTY_VAL = 'EMPTY_VAL' def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test.head()
code
32067919/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.optimize as opt train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] EMPTY_VAL = 'EMPTY_VAL' def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) train_['row_number'] = train_.groupby(['Country_Region', 'Province_State']).cumcount() x = train_[train_['Country_Region'] == 'US'][train_['Province_State'] == 'New York']['row_number'] y = train_[train_['Country_Region'] == 'US'][train_['Province_State'] == 'New York']['ConfirmedCases'] y_ = train_[train_['Country_Region'] == 'US'][train_['Province_State'] == 'New York']['Fatalities'] def f(x, L, b, k, x_0): return L / (1.0 + np.exp(-k * (x - x_0))) + b def logistic(xs, L, k, x_0): result = [] for x in xs: xp = k * (x - x_0) if xp >= 0: result.append(L / (1.0 + np.exp(-xp))) else: result.append(L * np.exp(xp) / (1.0 + np.exp(xp))) return result p0 = [max(y), 0.0, max(x)] p0_ = [max(y_), 0.0, max(x)] x_ = np.arange(0, 100, 1).tolist() try: popt, pcov = opt.curve_fit(logistic, x, y, p0) yfit = logistic(x_, *popt) popt_, pcov_ = opt.curve_fit(logistic, x, y_, p0_) yfit_ = logistic(x_, *popt_) except: popt, pcov = opt.curve_fit(f, x, y, method='lm', maxfev=5000) yfit = f(x_, *popt) popt_, pcov_ = opt.curve_fit(f, x, y_, method='lm', maxfev=5000) yfit_ = f(x_, *popt_) fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(x, y, 'o', label='Actual Cases') ax.plot(x_, yfit, '-', label='Fitted Cases') ax.plot(x, y_, 'o', label='Actual Fatalities') ax.plot(x_, yfit_, '-', label='Fitted fatalities') ax.title.set_text('US - New York') plt.legend(loc='center right') plt.show()
code
32067919/cell_16
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.optimize as opt train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] EMPTY_VAL = 'EMPTY_VAL' def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) train_['row_number'] = train_.groupby(['Country_Region', 'Province_State']).cumcount() x = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['row_number'] y = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['ConfirmedCases'] y_ = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['Fatalities'] def f(x, L, b, k, x_0): return L / (1. + np.exp(-k * (x - x_0))) + b def logistic(xs, L, k, x_0): result = [] for x in xs: xp = k*(x-x_0) if xp >= 0: result.append(L / ( 1. + np.exp(-xp) ) ) else: result.append(L * np.exp(xp) / ( 1. + np.exp(xp) ) ) return result p0 = [max(y), 0.0,max(x)] p0_ = [max(y_), 0.0,max(x)] x_ = np.arange(0, 100, 1).tolist() try: popt, pcov = opt.curve_fit(logistic, x, y,p0) yfit = logistic(x_, *popt) popt_, pcov_ = opt.curve_fit(logistic, x, y_,p0_) yfit_ = logistic(x_, *popt_) except: popt, pcov = opt.curve_fit(f, x, y, method="lm", maxfev=5000) yfit = f(x_, *popt) popt_, pcov_ = opt.curve_fit(f, x, y_, method="lm", maxfev=5000) yfit_ = f(x_, *popt_) #print("problem") fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(x, y, 'o', label ='Actual Cases') ax.plot(x_, yfit, '-', label ='Fitted Cases') ax.plot(x, y_, 'o', label ='Actual Fatalities') ax.plot(x_, yfit_, '-', label ='Fitted fatalities') ax.title.set_text('US - New York') plt.legend(loc="center right") plt.show() unique = pd.DataFrame(train_.groupby(['Country_Region', 'Province_State'], as_index=False).count()) import datetime as dt def date_day_diff(d1, d2): delta = dt.datetime.strptime(d1, '%Y-%m-%d') - dt.datetime.strptime(d2, '%Y-%m-%d') return delta.days log_regions = [] for index, region in unique.iterrows(): st = region['Province_State'] co = region['Country_Region'] rdata = train_[(train_['Province_State'] == st) & (train_['Country_Region'] == co)] t = rdata['Date'].values t = [float(date_day_diff(d, t[0])) for d in t] y = rdata['ConfirmedCases'].values y_ = rdata['Fatalities'].values p0 = [max(y), 0.0, max(t)] p0_ = [max(y_), 0.0, max(t)] try: popt, pcov = opt.curve_fit(logistic, t, y, p0, maxfev=10000) try: popt_, pcov_ = opt.curve_fit(logistic, t, y_, p0_, maxfev=10000) except: popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) except: popt, pcov = opt.curve_fit(f, t, y, method='trf', maxfev=10000) popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) log_regions = pd.DataFrame(log_regions) log_regions.columns = ['Country_Region', 'Province_State', 'ConfirmedCases', 'Fatalities'] T = np.arange(0, 100, 1).tolist() popt = list(log_regions[log_regions['Country_Region'] == 'Italy'][log_regions['Province_State'] == 'Italy']['ConfirmedCases'])[0] popt_ = list(log_regions[log_regions['Country_Region'] == 'Italy'][log_regions['Province_State'] == 'Italy']['Fatalities'])[0] try: yfit = logistic(T, *popt) yfit_ = logistic(T, *popt_) except: yfit = f(T, *popt) yfit_ = f(T, *popt_) fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(T, yfit, label='Fitted ConfirmedCases') ax.plot(T, yfit_, label='Fitted Fatalities') ax.title.set_text('Italy fitted params') plt.legend(loc='upper left') plt.show()
code
32067919/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train.head()
code
32067919/cell_17
[ "text_html_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.optimize as opt train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] EMPTY_VAL = 'EMPTY_VAL' def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) train_['row_number'] = train_.groupby(['Country_Region', 'Province_State']).cumcount() x = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['row_number'] y = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['ConfirmedCases'] y_ = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['Fatalities'] def f(x, L, b, k, x_0): return L / (1. + np.exp(-k * (x - x_0))) + b def logistic(xs, L, k, x_0): result = [] for x in xs: xp = k*(x-x_0) if xp >= 0: result.append(L / ( 1. + np.exp(-xp) ) ) else: result.append(L * np.exp(xp) / ( 1. + np.exp(xp) ) ) return result p0 = [max(y), 0.0,max(x)] p0_ = [max(y_), 0.0,max(x)] x_ = np.arange(0, 100, 1).tolist() try: popt, pcov = opt.curve_fit(logistic, x, y,p0) yfit = logistic(x_, *popt) popt_, pcov_ = opt.curve_fit(logistic, x, y_,p0_) yfit_ = logistic(x_, *popt_) except: popt, pcov = opt.curve_fit(f, x, y, method="lm", maxfev=5000) yfit = f(x_, *popt) popt_, pcov_ = opt.curve_fit(f, x, y_, method="lm", maxfev=5000) yfit_ = f(x_, *popt_) #print("problem") fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(x, y, 'o', label ='Actual Cases') ax.plot(x_, yfit, '-', label ='Fitted Cases') ax.plot(x, y_, 'o', label ='Actual Fatalities') ax.plot(x_, yfit_, '-', label ='Fitted fatalities') ax.title.set_text('US - New York') plt.legend(loc="center right") plt.show() unique = pd.DataFrame(train_.groupby(['Country_Region', 'Province_State'], as_index=False).count()) import datetime as dt def date_day_diff(d1, d2): delta = dt.datetime.strptime(d1, '%Y-%m-%d') - dt.datetime.strptime(d2, '%Y-%m-%d') return delta.days log_regions = [] for index, region in unique.iterrows(): st = region['Province_State'] co = region['Country_Region'] rdata = train_[(train_['Province_State'] == st) & (train_['Country_Region'] == co)] t = rdata['Date'].values t = [float(date_day_diff(d, t[0])) for d in t] y = rdata['ConfirmedCases'].values y_ = rdata['Fatalities'].values p0 = [max(y), 0.0, max(t)] p0_ = [max(y_), 0.0, max(t)] try: popt, pcov = opt.curve_fit(logistic, t, y, p0, maxfev=10000) try: popt_, pcov_ = opt.curve_fit(logistic, t, y_, p0_, maxfev=10000) except: popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) except: popt, pcov = opt.curve_fit(f, t, y, method='trf', maxfev=10000) popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) log_regions = pd.DataFrame(log_regions) log_regions.columns = ['Country_Region', 'Province_State', 'ConfirmedCases', 'Fatalities'] T = np.arange(0, 100, 1).tolist() popt = list(log_regions[log_regions["Country_Region"] == 'Italy'][log_regions["Province_State"] == 'Italy']['ConfirmedCases'])[0] popt_ = list(log_regions[log_regions["Country_Region"] == 'Italy'][log_regions["Province_State"] == 'Italy']['Fatalities'])[0] try: yfit = logistic(T, *popt) yfit_ = logistic(T, *popt_) except: yfit = f(T, *popt) yfit_ = f(T, *popt_) fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(T, yfit, label="Fitted ConfirmedCases") ax.plot(T, yfit_, label="Fitted Fatalities") ax.title.set_text('Italy fitted params') plt.legend(loc="upper left") plt.show() for index, rt in log_regions.iterrows(): st = rt['Province_State'] co = rt['Country_Region'] popt = list(['ConfirmedCases']) popt_ = list(rt['Fatalities']) print(co, st, popt, popt_)
code
32067919/cell_14
[ "text_html_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.optimize as opt train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] EMPTY_VAL = 'EMPTY_VAL' def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) train_['row_number'] = train_.groupby(['Country_Region', 'Province_State']).cumcount() x = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['row_number'] y = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['ConfirmedCases'] y_ = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['Fatalities'] def f(x, L, b, k, x_0): return L / (1. + np.exp(-k * (x - x_0))) + b def logistic(xs, L, k, x_0): result = [] for x in xs: xp = k*(x-x_0) if xp >= 0: result.append(L / ( 1. + np.exp(-xp) ) ) else: result.append(L * np.exp(xp) / ( 1. + np.exp(xp) ) ) return result p0 = [max(y), 0.0,max(x)] p0_ = [max(y_), 0.0,max(x)] x_ = np.arange(0, 100, 1).tolist() try: popt, pcov = opt.curve_fit(logistic, x, y,p0) yfit = logistic(x_, *popt) popt_, pcov_ = opt.curve_fit(logistic, x, y_,p0_) yfit_ = logistic(x_, *popt_) except: popt, pcov = opt.curve_fit(f, x, y, method="lm", maxfev=5000) yfit = f(x_, *popt) popt_, pcov_ = opt.curve_fit(f, x, y_, method="lm", maxfev=5000) yfit_ = f(x_, *popt_) #print("problem") fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(x, y, 'o', label ='Actual Cases') ax.plot(x_, yfit, '-', label ='Fitted Cases') ax.plot(x, y_, 'o', label ='Actual Fatalities') ax.plot(x_, yfit_, '-', label ='Fitted fatalities') ax.title.set_text('US - New York') plt.legend(loc="center right") plt.show() unique = pd.DataFrame(train_.groupby(['Country_Region', 'Province_State'], as_index=False).count()) import datetime as dt def date_day_diff(d1, d2): delta = dt.datetime.strptime(d1, '%Y-%m-%d') - dt.datetime.strptime(d2, '%Y-%m-%d') return delta.days log_regions = [] for index, region in unique.iterrows(): st = region['Province_State'] co = region['Country_Region'] rdata = train_[(train_['Province_State'] == st) & (train_['Country_Region'] == co)] t = rdata['Date'].values t = [float(date_day_diff(d, t[0])) for d in t] y = rdata['ConfirmedCases'].values y_ = rdata['Fatalities'].values p0 = [max(y), 0.0, max(t)] p0_ = [max(y_), 0.0, max(t)] try: popt, pcov = opt.curve_fit(logistic, t, y, p0, maxfev=10000) try: popt_, pcov_ = opt.curve_fit(logistic, t, y_, p0_, maxfev=10000) except: popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) except: popt, pcov = opt.curve_fit(f, t, y, method='trf', maxfev=10000) popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) log_regions = pd.DataFrame(log_regions) log_regions.columns = ['Country_Region', 'Province_State', 'ConfirmedCases', 'Fatalities'] log_regions.head(1)
code
32067919/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.optimize as opt train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] EMPTY_VAL = 'EMPTY_VAL' def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) train_['row_number'] = train_.groupby(['Country_Region', 'Province_State']).cumcount() x = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['row_number'] y = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['ConfirmedCases'] y_ = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['Fatalities'] def f(x, L, b, k, x_0): return L / (1. + np.exp(-k * (x - x_0))) + b def logistic(xs, L, k, x_0): result = [] for x in xs: xp = k*(x-x_0) if xp >= 0: result.append(L / ( 1. + np.exp(-xp) ) ) else: result.append(L * np.exp(xp) / ( 1. + np.exp(xp) ) ) return result p0 = [max(y), 0.0,max(x)] p0_ = [max(y_), 0.0,max(x)] x_ = np.arange(0, 100, 1).tolist() try: popt, pcov = opt.curve_fit(logistic, x, y,p0) yfit = logistic(x_, *popt) popt_, pcov_ = opt.curve_fit(logistic, x, y_,p0_) yfit_ = logistic(x_, *popt_) except: popt, pcov = opt.curve_fit(f, x, y, method="lm", maxfev=5000) yfit = f(x_, *popt) popt_, pcov_ = opt.curve_fit(f, x, y_, method="lm", maxfev=5000) yfit_ = f(x_, *popt_) #print("problem") fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(x, y, 'o', label ='Actual Cases') ax.plot(x_, yfit, '-', label ='Fitted Cases') ax.plot(x, y_, 'o', label ='Actual Fatalities') ax.plot(x_, yfit_, '-', label ='Fitted fatalities') ax.title.set_text('US - New York') plt.legend(loc="center right") plt.show() unique = pd.DataFrame(train_.groupby(['Country_Region', 'Province_State'], as_index=False).count()) unique.head()
code
32067919/cell_12
[ "text_html_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.optimize as opt train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_ = train[train['ConfirmedCases'] >= 0] EMPTY_VAL = 'EMPTY_VAL' def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x: fillState(x['Province_State'], x['Country_Region']), axis=1) train_['row_number'] = train_.groupby(['Country_Region', 'Province_State']).cumcount() x = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['row_number'] y = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['ConfirmedCases'] y_ = train_[train_["Country_Region"] == 'US'][train_["Province_State"] == 'New York']['Fatalities'] def f(x, L, b, k, x_0): return L / (1. + np.exp(-k * (x - x_0))) + b def logistic(xs, L, k, x_0): result = [] for x in xs: xp = k*(x-x_0) if xp >= 0: result.append(L / ( 1. + np.exp(-xp) ) ) else: result.append(L * np.exp(xp) / ( 1. + np.exp(xp) ) ) return result p0 = [max(y), 0.0,max(x)] p0_ = [max(y_), 0.0,max(x)] x_ = np.arange(0, 100, 1).tolist() try: popt, pcov = opt.curve_fit(logistic, x, y,p0) yfit = logistic(x_, *popt) popt_, pcov_ = opt.curve_fit(logistic, x, y_,p0_) yfit_ = logistic(x_, *popt_) except: popt, pcov = opt.curve_fit(f, x, y, method="lm", maxfev=5000) yfit = f(x_, *popt) popt_, pcov_ = opt.curve_fit(f, x, y_, method="lm", maxfev=5000) yfit_ = f(x_, *popt_) #print("problem") fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(x, y, 'o', label ='Actual Cases') ax.plot(x_, yfit, '-', label ='Fitted Cases') ax.plot(x, y_, 'o', label ='Actual Fatalities') ax.plot(x_, yfit_, '-', label ='Fitted fatalities') ax.title.set_text('US - New York') plt.legend(loc="center right") plt.show() unique = pd.DataFrame(train_.groupby(['Country_Region', 'Province_State'], as_index=False).count()) import datetime as dt def date_day_diff(d1, d2): delta = dt.datetime.strptime(d1, '%Y-%m-%d') - dt.datetime.strptime(d2, '%Y-%m-%d') return delta.days log_regions = [] for index, region in unique.iterrows(): st = region['Province_State'] co = region['Country_Region'] rdata = train_[(train_['Province_State'] == st) & (train_['Country_Region'] == co)] t = rdata['Date'].values t = [float(date_day_diff(d, t[0])) for d in t] y = rdata['ConfirmedCases'].values y_ = rdata['Fatalities'].values p0 = [max(y), 0.0, max(t)] p0_ = [max(y_), 0.0, max(t)] try: popt, pcov = opt.curve_fit(logistic, t, y, p0, maxfev=10000) try: popt_, pcov_ = opt.curve_fit(logistic, t, y_, p0_, maxfev=10000) except: popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) except: popt, pcov = opt.curve_fit(f, t, y, method='trf', maxfev=10000) popt_, pcov_ = opt.curve_fit(f, t, y_, method='trf', maxfev=10000) log_regions.append((co, st, popt, popt_)) print('All done!')
code
32068517/cell_13
[ "text_plain_output_1.png" ]
PATH = '/kaggle/input/covid19-global-forecasting-week-4/' train_df = pd.read_csv(PATH + 'train.csv', parse_dates=['Date']) test_df = pd.read_csv(PATH + 'test.csv', parse_dates=['Date']) add_datepart(train_df, 'Date', drop=False) add_datepart(test_df, 'Date', drop=False) train_df.shape PATH1 = '/kaggle/input/covid19-country-data-wk3-release/' meta_convert_fun = lambda x: np.float32(x) if x not in ['N.A.', '#N/A', '#NULL!'] else np.nan meta_df = pd.read_csv(PATH1 + 'Data Join - RELEASE.csv', thousands=',', converters={' TFR ': meta_convert_fun, 'Personality_uai': meta_convert_fun}).rename(columns=lambda x: x.strip()) PATH2 = '/kaggle/input/countryinfo/' countryinfo = pd.read_csv(PATH2 + 'covid19countryinfo.csv', thousands=',', parse_dates=['quarantine', 'schools', 'publicplace', 'gathering', 'nonessential']) testinfo = pd.read_csv(PATH2 + 'covid19tests.csv', thousands=',') countryinfo.rename(columns={'region': 'Province_State', 'country': 'Country_Region'}, inplace=True) testinfo.rename(columns={'region': 'Province_State', 'country': 'Country_Region'}, inplace=True) testinfo = testinfo.drop(['alpha3code', 'alpha2code', 'date'], axis=1) PATH3 = '/kaggle/input/covid19-forecasting-metadata/' continent_meta = pd.read_csv(PATH3 + 'region_metadata.csv').rename(columns={'density': 'pop_density'}) continent_meta = continent_meta[['Country_Region', 'Province_State', 'continent', 'lat', 'lon', 'pop_density']] recoveries_meta = pd.read_csv(PATH3 + 'region_date_metadata.csv', parse_dates=['Date']) def fill_unknown_state(df): df.fillna({'Province_State': 'Unknown'}, inplace=True) for d in [train_df, test_df, meta_df, countryinfo, testinfo, continent_meta, recoveries_meta]: fill_unknown_state(d) outliars = ['China'] out_inputs = {} for out in outliars: out_inputs[out] = train_df[train_df['Country_Region'] == out] train_df.drop(train_df.index[train_df['Country_Region'] == out], inplace=True) test_ori = test_df.copy() merge_cols = ['Province_State', 'Country_Region', 'Date'] test_hlp = test_df[merge_cols + ['ForecastId']] fst_date = test_hlp.Date.min() outlier_dfs = [] for out, in_df in out_inputs.items(): last_date = in_df.Date.max() merged = in_df[in_df['Date'] >= fst_date].merge(test_hlp, on=merge_cols, how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] future_test = test_hlp[(test_hlp['Country_Region'] == out) & (test_hlp['Date'] > last_date)] to_add = in_df.groupby(['Province_State', 'Country_Region']).last().reset_index()[['Province_State', 'Country_Region', 'ConfirmedCases', 'Fatalities']] merged_future = future_test.merge(to_add, on=['Province_State', 'Country_Region'], how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] merged = pd.concat([merged, merged_future], sort=True) test_df.drop(test_df[test_df['ForecastId'].isin(merged.ForecastId)].index, inplace=True) outlier_dfs.append(merged) outlier_df = pd.concat(outlier_dfs, sort=True) outlier_df.index = outlier_df['ForecastId'] outlier_df.drop('ForecastId', axis=1, inplace=True) train_max_date = train_df.Date.max() outlier_all = test_df[test_df['Date'] <= train_max_date].merge(train_df, on=merge_cols, how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] outlier_all.index = outlier_all.ForecastId test_df.drop(test_df[test_df['ForecastId'].isin(outlier_all.ForecastId)].index, inplace=True) outlier_all.drop('ForecastId', axis=1, inplace=True) outlier_df = pd.concat([outlier_df, outlier_all], sort=True) outlier_df
code
32068517/cell_20
[ "text_html_output_1.png" ]
PATH = '/kaggle/input/covid19-global-forecasting-week-4/' train_df = pd.read_csv(PATH + 'train.csv', parse_dates=['Date']) test_df = pd.read_csv(PATH + 'test.csv', parse_dates=['Date']) add_datepart(train_df, 'Date', drop=False) add_datepart(test_df, 'Date', drop=False) train_df.shape PATH1 = '/kaggle/input/covid19-country-data-wk3-release/' meta_convert_fun = lambda x: np.float32(x) if x not in ['N.A.', '#N/A', '#NULL!'] else np.nan meta_df = pd.read_csv(PATH1 + 'Data Join - RELEASE.csv', thousands=',', converters={' TFR ': meta_convert_fun, 'Personality_uai': meta_convert_fun}).rename(columns=lambda x: x.strip()) PATH2 = '/kaggle/input/countryinfo/' countryinfo = pd.read_csv(PATH2 + 'covid19countryinfo.csv', thousands=',', parse_dates=['quarantine', 'schools', 'publicplace', 'gathering', 'nonessential']) testinfo = pd.read_csv(PATH2 + 'covid19tests.csv', thousands=',') countryinfo.rename(columns={'region': 'Province_State', 'country': 'Country_Region'}, inplace=True) testinfo.rename(columns={'region': 'Province_State', 'country': 'Country_Region'}, inplace=True) testinfo = testinfo.drop(['alpha3code', 'alpha2code', 'date'], axis=1) PATH3 = '/kaggle/input/covid19-forecasting-metadata/' continent_meta = pd.read_csv(PATH3 + 'region_metadata.csv').rename(columns={'density': 'pop_density'}) continent_meta = continent_meta[['Country_Region', 'Province_State', 'continent', 'lat', 'lon', 'pop_density']] recoveries_meta = pd.read_csv(PATH3 + 'region_date_metadata.csv', parse_dates=['Date']) def fill_unknown_state(df): df.fillna({'Province_State': 'Unknown'}, inplace=True) for d in [train_df, test_df, meta_df, countryinfo, testinfo, continent_meta, recoveries_meta]: fill_unknown_state(d) outliars = ['China'] out_inputs = {} for out in outliars: out_inputs[out] = train_df[train_df['Country_Region'] == out] train_df.drop(train_df.index[train_df['Country_Region'] == out], inplace=True) test_ori = test_df.copy() merge_cols = ['Province_State', 'Country_Region', 'Date'] test_hlp = test_df[merge_cols + ['ForecastId']] fst_date = test_hlp.Date.min() outlier_dfs = [] for out, in_df in out_inputs.items(): last_date = in_df.Date.max() merged = in_df[in_df['Date'] >= fst_date].merge(test_hlp, on=merge_cols, how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] future_test = test_hlp[(test_hlp['Country_Region'] == out) & (test_hlp['Date'] > last_date)] to_add = in_df.groupby(['Province_State', 'Country_Region']).last().reset_index()[['Province_State', 'Country_Region', 'ConfirmedCases', 'Fatalities']] merged_future = future_test.merge(to_add, on=['Province_State', 'Country_Region'], how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] merged = pd.concat([merged, merged_future], sort=True) test_df.drop(test_df[test_df['ForecastId'].isin(merged.ForecastId)].index, inplace=True) outlier_dfs.append(merged) outlier_df = pd.concat(outlier_dfs, sort=True) outlier_df.index = outlier_df['ForecastId'] outlier_df.drop('ForecastId', axis=1, inplace=True) train_max_date = train_df.Date.max() outlier_all = test_df[test_df['Date'] <= train_max_date].merge(train_df, on=merge_cols, how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] outlier_all.index = outlier_all.ForecastId test_df.drop(test_df[test_df['ForecastId'].isin(outlier_all.ForecastId)].index, inplace=True) outlier_all.drop('ForecastId', axis=1, inplace=True) outlier_df = pd.concat([outlier_df, outlier_all], sort=True) outlier_df idx_group = ['Country_Region', 'Province_State'] def day_reached_cases(df, name, no_cases=1): """For each country/province get first day of year with at least given number of cases.""" gb = df[df['ConfirmedCases'] >= no_cases].groupby(idx_group) return gb.Dayofyear.first().reset_index().rename(columns={'Dayofyear': name}) def area_fatality_rate(df): """Get average fatality rate for last known entry, for each country/province.""" gb = df[df['Fatalities'] >= 22].groupby(idx_group) res_df = (gb.Fatalities.last() / gb.ConfirmedCases.last()).reset_index() return res_df.rename(columns={0: 'FatalityRate'}) def joined_data(df): res = df.copy() fatality = area_fatality_rate(train_df) first_nonzero = day_reached_cases(train_df, 'FirstCaseDay', 1) first_fifty = day_reached_cases(train_df, 'First50CasesDay', 50) res = pd.merge(res, continent_meta, how='left') res = pd.merge(res, recoveries_meta, how='left') res = pd.merge(res, meta_df, how='left') res = pd.merge(res, countryinfo, how='left') res = pd.merge(res, testinfo, how='left', left_on=idx_group, right_on=idx_group) res = pd.merge(res, fatality, how='left') res = pd.merge(res, first_nonzero, how='left') res = pd.merge(res, first_fifty, how='left') return res train_df = joined_data(train_df) test_df = joined_data(test_df) train_df.FirstCaseDay.isna().sum() def with_new_features(df): res = df.copy() add_datepart(res, 'quarantine', prefix='qua') add_datepart(res, 'schools', prefix='sql') res['DaysSinceFirst'] = res['Dayofyear'] - res['FirstCaseDay'] res['DaysSince50'] = res['Dayofyear'] - res['First50CasesDay'] res['DaysQua'] = res['Dayofyear'] - res['quaDayofyear'] res['DaysSql'] = res['Dayofyear'] - res['sqlDayofyear'] return res train_df = with_new_features(train_df) test_df = with_new_features(test_df) train_df.shape
code
32068517/cell_2
[ "text_plain_output_1.png" ]
!pip install fastai2 !pip install fast_tabnet
code
32068517/cell_3
[ "text_plain_output_1.png" ]
import os from fastai2.basics import * from fastai2.tabular.all import * from fast_tabnet.core import * import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068517/cell_17
[ "text_plain_output_1.png" ]
PATH = '/kaggle/input/covid19-global-forecasting-week-4/' train_df = pd.read_csv(PATH + 'train.csv', parse_dates=['Date']) test_df = pd.read_csv(PATH + 'test.csv', parse_dates=['Date']) add_datepart(train_df, 'Date', drop=False) add_datepart(test_df, 'Date', drop=False) train_df.shape PATH1 = '/kaggle/input/covid19-country-data-wk3-release/' meta_convert_fun = lambda x: np.float32(x) if x not in ['N.A.', '#N/A', '#NULL!'] else np.nan meta_df = pd.read_csv(PATH1 + 'Data Join - RELEASE.csv', thousands=',', converters={' TFR ': meta_convert_fun, 'Personality_uai': meta_convert_fun}).rename(columns=lambda x: x.strip()) PATH2 = '/kaggle/input/countryinfo/' countryinfo = pd.read_csv(PATH2 + 'covid19countryinfo.csv', thousands=',', parse_dates=['quarantine', 'schools', 'publicplace', 'gathering', 'nonessential']) testinfo = pd.read_csv(PATH2 + 'covid19tests.csv', thousands=',') countryinfo.rename(columns={'region': 'Province_State', 'country': 'Country_Region'}, inplace=True) testinfo.rename(columns={'region': 'Province_State', 'country': 'Country_Region'}, inplace=True) testinfo = testinfo.drop(['alpha3code', 'alpha2code', 'date'], axis=1) PATH3 = '/kaggle/input/covid19-forecasting-metadata/' continent_meta = pd.read_csv(PATH3 + 'region_metadata.csv').rename(columns={'density': 'pop_density'}) continent_meta = continent_meta[['Country_Region', 'Province_State', 'continent', 'lat', 'lon', 'pop_density']] recoveries_meta = pd.read_csv(PATH3 + 'region_date_metadata.csv', parse_dates=['Date']) def fill_unknown_state(df): df.fillna({'Province_State': 'Unknown'}, inplace=True) for d in [train_df, test_df, meta_df, countryinfo, testinfo, continent_meta, recoveries_meta]: fill_unknown_state(d) outliars = ['China'] out_inputs = {} for out in outliars: out_inputs[out] = train_df[train_df['Country_Region'] == out] train_df.drop(train_df.index[train_df['Country_Region'] == out], inplace=True) test_ori = test_df.copy() merge_cols = ['Province_State', 'Country_Region', 'Date'] test_hlp = test_df[merge_cols + ['ForecastId']] fst_date = test_hlp.Date.min() outlier_dfs = [] for out, in_df in out_inputs.items(): last_date = in_df.Date.max() merged = in_df[in_df['Date'] >= fst_date].merge(test_hlp, on=merge_cols, how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] future_test = test_hlp[(test_hlp['Country_Region'] == out) & (test_hlp['Date'] > last_date)] to_add = in_df.groupby(['Province_State', 'Country_Region']).last().reset_index()[['Province_State', 'Country_Region', 'ConfirmedCases', 'Fatalities']] merged_future = future_test.merge(to_add, on=['Province_State', 'Country_Region'], how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] merged = pd.concat([merged, merged_future], sort=True) test_df.drop(test_df[test_df['ForecastId'].isin(merged.ForecastId)].index, inplace=True) outlier_dfs.append(merged) outlier_df = pd.concat(outlier_dfs, sort=True) outlier_df.index = outlier_df['ForecastId'] outlier_df.drop('ForecastId', axis=1, inplace=True) train_max_date = train_df.Date.max() outlier_all = test_df[test_df['Date'] <= train_max_date].merge(train_df, on=merge_cols, how='left')[['ForecastId', 'ConfirmedCases', 'Fatalities']] outlier_all.index = outlier_all.ForecastId test_df.drop(test_df[test_df['ForecastId'].isin(outlier_all.ForecastId)].index, inplace=True) outlier_all.drop('ForecastId', axis=1, inplace=True) outlier_df = pd.concat([outlier_df, outlier_all], sort=True) outlier_df idx_group = ['Country_Region', 'Province_State'] def day_reached_cases(df, name, no_cases=1): """For each country/province get first day of year with at least given number of cases.""" gb = df[df['ConfirmedCases'] >= no_cases].groupby(idx_group) return gb.Dayofyear.first().reset_index().rename(columns={'Dayofyear': name}) def area_fatality_rate(df): """Get average fatality rate for last known entry, for each country/province.""" gb = df[df['Fatalities'] >= 22].groupby(idx_group) res_df = (gb.Fatalities.last() / gb.ConfirmedCases.last()).reset_index() return res_df.rename(columns={0: 'FatalityRate'}) def joined_data(df): res = df.copy() fatality = area_fatality_rate(train_df) first_nonzero = day_reached_cases(train_df, 'FirstCaseDay', 1) first_fifty = day_reached_cases(train_df, 'First50CasesDay', 50) res = pd.merge(res, continent_meta, how='left') res = pd.merge(res, recoveries_meta, how='left') res = pd.merge(res, meta_df, how='left') res = pd.merge(res, countryinfo, how='left') res = pd.merge(res, testinfo, how='left', left_on=idx_group, right_on=idx_group) res = pd.merge(res, fatality, how='left') res = pd.merge(res, first_nonzero, how='left') res = pd.merge(res, first_fifty, how='left') return res train_df = joined_data(train_df) test_df = joined_data(test_df) train_df.FirstCaseDay.isna().sum()
code
32068517/cell_5
[ "text_html_output_1.png" ]
PATH = '/kaggle/input/covid19-global-forecasting-week-4/' train_df = pd.read_csv(PATH + 'train.csv', parse_dates=['Date']) test_df = pd.read_csv(PATH + 'test.csv', parse_dates=['Date']) add_datepart(train_df, 'Date', drop=False) add_datepart(test_df, 'Date', drop=False) train_df.shape
code
16164174/cell_9
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1) simple_feature_cutting_df = simple_feature_cutting_df.dropna() simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex']) simple_feature_cutting_df test_data_set = simple_feature_cutting_df[:100] train_data_set = simple_feature_cutting_df[100:] from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() label_data = train_data_set['Survived'] train_data = train_data_set.drop('Survived', axis=1) model.fit(train_data, label_data) result_test_predict = model.predict(test_data_set.drop('Survived', axis=1)) real_test_observations = np.array(test_data_set['Survived']) result = pd.DataFrame({'predict': result_test_predict, 'real': real_test_observations}) result['Correct'] = result.apply(lambda row: row['predict'] == row['real'], axis=1) result
code
16164174/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df
code
16164174/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1) simple_feature_cutting_df = simple_feature_cutting_df.dropna() simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex']) simple_feature_cutting_df
code
16164174/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16164174/cell_8
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1) simple_feature_cutting_df = simple_feature_cutting_df.dropna() simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex']) simple_feature_cutting_df test_data_set = simple_feature_cutting_df[:100] train_data_set = simple_feature_cutting_df[100:] from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() label_data = train_data_set['Survived'] train_data = train_data_set.drop('Survived', axis=1) model.fit(train_data, label_data)
code
16164174/cell_10
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1) simple_feature_cutting_df = simple_feature_cutting_df.dropna() simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex']) simple_feature_cutting_df test_data_set = simple_feature_cutting_df[:100] train_data_set = simple_feature_cutting_df[100:] from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() label_data = train_data_set['Survived'] train_data = train_data_set.drop('Survived', axis=1) model.fit(train_data, label_data) result_test_predict = model.predict(test_data_set.drop('Survived', axis=1)) real_test_observations = np.array(test_data_set['Survived']) result = pd.DataFrame({'predict': result_test_predict, 'real': real_test_observations}) result['Correct'] = result.apply(lambda row: row['predict'] == row['real'], axis=1) result num_correct = len(result[result['Correct']]) num_total = len(result) num_correct / num_total
code
16164174/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/train.csv') df import matplotlib.pyplot as plt df['Age'].hist(bins=20)
code
128027681/cell_4
[ "image_output_5.png", "image_output_4.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename == '2018.csv': data_path = os.path.join(dirname, filename) data = pd.read_csv(data_path) from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, r2_score X = data[['GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption']] y = data['Score'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = LinearRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) coeff_df = pd.DataFrame(model.coef_, X.columns, columns=['Coefficient']) import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename == 'report_2018-2019.csv': data_path = os.path.join(dirname, filename) data = pd.read_csv(data_path) X = data.drop(['Overall rank', 'Country or region', 'Score', 'Year'], axis=1) y = data['Score'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) reg = LinearRegression() reg.fit(X_train, y_train) y_pred = reg.predict(X_test) mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) coefficients = pd.DataFrame(reg.coef_, X.columns, columns=['Coefficient']) plt.figure(figsize=(8, 6)) sns.histplot(data=data, x='Score', kde=True) plt.title('Distribution of Happiness Scores') plt.xlabel('Happiness Score') plt.ylabel('Frequency') plt.show() yearly_data = data.groupby('Year').mean() yearly_data.reset_index(level=0, inplace=True) yearly_data.plot(x='Year', y=['Score', 'GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption'], kind='bar', subplots=True, layout=(4, 2), figsize=(15, 15), legend=False) plt.tight_layout() plt.show() top_10_2019 = data[data['Year'] == 2019].sort_values('Score', ascending=False).head(10) print('Top 10 countries by happiness score in 2019:\n', top_10_2019['Country or region']) bottom_10_2019 = data[data['Year'] == 2019].sort_values('Score', ascending=True).head(10) print('Bottom 10 countries by happiness score in 2019:\n', bottom_10_2019['Country or region']) top_gdp_2019 = data[data['Year'] == 2019].sort_values('GDP per capita', ascending=False).head(5) top_social_2019 = data[data['Year'] == 2019].sort_values('Social support', ascending=False).head(5) top_health_2019 = data[data['Year'] == 2019].sort_values('Healthy life expectancy', ascending=False).head(5) print('Top 5 countries by GDP per capita in 2019:\n', top_gdp_2019['Country or region']) print('Top 5 countries by Social support in 2019:\n', top_social_2019['Country or region']) print('Top 5 countries by Healthy life expectancy in 2019:\n', top_health_2019['Country or region']) plt.figure(figsize=(8, 6)) sns.scatterplot(data=data, x='GDP per capita', y='Score', hue='Year', style='Year') plt.title('GDP per Capita vs Happiness Score') plt.xlabel('GDP per Capita') plt.ylabel('Happiness Score') plt.show() plt.figure(figsize=(8, 6)) sns.scatterplot(data=data, x='Social support', y='Score', hue='Year', style='Year') plt.title('Social Support vs Happiness Score') plt.xlabel('Social Support') plt.ylabel('Happiness Score') plt.show() plt.figure(figsize=(8, 6)) sns.scatterplot(data=data, x='Healthy life expectancy', y='Score', hue='Year', style='Year') plt.title('Healthy Life Expectancy vs Happiness Score') plt.xlabel('Healthy Life Expectancy') plt.ylabel('Happiness Score') plt.show()
code
128027681/cell_1
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename == '2018.csv': data_path = os.path.join(dirname, filename) data = pd.read_csv(data_path) print('Dataset shape:', data.shape) print('\nDataset info:') print(data.info()) print('\nDataset description:') print(data.describe()) plt.figure(figsize=(10, 8)) sns.heatmap(data.corr(), annot=True, cmap='coolwarm') plt.title('Correlation Matrix') plt.show() sns.pairplot(data) plt.show() from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, r2_score X = data[['GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption']] y = data['Score'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = LinearRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print('Mean Squared Error:', mse) print('R-squared:', r2) coeff_df = pd.DataFrame(model.coef_, X.columns, columns=['Coefficient']) print('\nCoefficients:') print(coeff_df)
code
128027681/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename == '2018.csv': data_path = os.path.join(dirname, filename) data = pd.read_csv(data_path) from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, r2_score X = data[['GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption']] y = data['Score'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = LinearRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) coeff_df = pd.DataFrame(model.coef_, X.columns, columns=['Coefficient']) import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename == 'report_2018-2019.csv': data_path = os.path.join(dirname, filename) data = pd.read_csv(data_path) print(data.head()) print(data.info()) print(data.describe()) plt.figure(figsize=(10, 8)) sns.heatmap(data.corr(), annot=True, cmap='coolwarm') X = data.drop(['Overall rank', 'Country or region', 'Score', 'Year'], axis=1) y = data['Score'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) reg = LinearRegression() reg.fit(X_train, y_train) y_pred = reg.predict(X_test) mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print('Mean Squared Error:', mse) print('R-squared:', r2) coefficients = pd.DataFrame(reg.coef_, X.columns, columns=['Coefficient']) print(coefficients)
code
17120125/cell_42
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import pandas as pd import pickle import tensorflow as tf IMG_SIZE = 512 import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import cv2 import os train = pd.read_csv('../input/aptos2019-blindness-detection/train.csv') test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') import pickle pickle_in_train_x = open('../input/preprocessed-data-aptos2019blindnessdetection/train_x_aptos2019-blindness-detection.pickle', 'rb') pickle_in_train_y = open('../input/preprocessed-data-aptos2019blindnessdetection/train_y_aptos2019-blindness-detection.pickle', 'rb') pickle_in_test_x = open('../input/preprocessed-data-aptos2019blindnessdetection/test_x_aptos2019-blindness-detection.pickle', 'rb') train_x = pickle.load(pickle_in_train_x) train_y = pickle.load(pickle_in_train_y) test_x = pickle.load(pickle_in_test_x) """ def crop_image1(img,tol=7): # img is image data # tol is tolerance mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] """ def crop_image_from_gray(img, tol=7): if img.ndim == 2: mask = img > tol return img[np.ix_(mask.any(1), mask.any(0))] elif img.ndim == 3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img > tol check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0] if check_shape == 0: return img else: img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))] img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))] img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))] img = np.stack([img1, img2, img3], axis=-1) return img def load_ben_color(data, img_size, sigmaX=10): if data.ndim == 4: for i in range(len(data)): image = cv2.cvtColor(data[i], cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image) image = cv2.resize(image, (img_size, img_size)) data[i] = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), sigmaX), -4, 128) elif data.ndim == 3: data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB) data = crop_image_from_gray(data) data = cv2.resize(data, (img_size, img_size)) data = cv2.addWeighted(data, 4, cv2.GaussianBlur(data, (0, 0), sigmaX), -4, 128) else: return 0 return data train_x = load_ben_color(train_x, IMG_SIZE, sigmaX=10) train_x = train_x.astype('float32') def create_model_1(): layers_1 = [tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation=tf.nn.relu, input_shape=train_x.shape[1:]), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=512, activation=tf.nn.relu), tf.keras.layers.Dense(units=256, activation=tf.nn.relu), tf.keras.layers.Dense(units=len(np.unique(train_y)), activation=tf.nn.softmax)] model_1 = tf.keras.Sequential(layers_1) model_1.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) return model_1 model_1 = create_model_1() model_1.summary() test_predicted = model_1.predict(test_x) test_predicted = [np.argmax(i) for i in test_predicted] test_result = pd.DataFrame({'id_code': test['id_code'].values, 'diagnosis': test_predicted}) test_result.head()
code
17120125/cell_21
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np import pickle IMG_SIZE = 512 import pickle pickle_in_train_x = open('../input/preprocessed-data-aptos2019blindnessdetection/train_x_aptos2019-blindness-detection.pickle', 'rb') pickle_in_train_y = open('../input/preprocessed-data-aptos2019blindnessdetection/train_y_aptos2019-blindness-detection.pickle', 'rb') pickle_in_test_x = open('../input/preprocessed-data-aptos2019blindnessdetection/test_x_aptos2019-blindness-detection.pickle', 'rb') train_x = pickle.load(pickle_in_train_x) train_y = pickle.load(pickle_in_train_y) test_x = pickle.load(pickle_in_test_x) """ def crop_image1(img,tol=7): # img is image data # tol is tolerance mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] """ def crop_image_from_gray(img, tol=7): if img.ndim == 2: mask = img > tol return img[np.ix_(mask.any(1), mask.any(0))] elif img.ndim == 3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img > tol check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0] if check_shape == 0: return img else: img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))] img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))] img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))] img = np.stack([img1, img2, img3], axis=-1) return img def load_ben_color(data, img_size, sigmaX=10): if data.ndim == 4: for i in range(len(data)): image = cv2.cvtColor(data[i], cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image) image = cv2.resize(image, (img_size, img_size)) data[i] = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), sigmaX), -4, 128) elif data.ndim == 3: data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB) data = crop_image_from_gray(data) data = cv2.resize(data, (img_size, img_size)) data = cv2.addWeighted(data, 4, cv2.GaussianBlur(data, (0, 0), sigmaX), -4, 128) else: return 0 return data train_x = load_ben_color(train_x, IMG_SIZE, sigmaX=10) train_x = train_x.astype('float32') plt.imshow(train_x[0]) plt.show()
code
17120125/cell_34
[ "text_plain_output_1.png" ]
""" # https://www.youtube.com/watch?v=HxtBIwfy0kM checkpoint_path = 'cp_model_1_aptos2019-blindness-detection.ckpt' checkpoint_dir = os.path.dirname(checkpoint_path) # Create checkpoint callback cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) model_1 = create_model_1() model_1.fit(train_x, train_y, epochs=5, batch_size=32, callbacks=[cp_callback]) # pass calback to training """
code
17120125/cell_26
[ "text_plain_output_1.png" ]
""" pickle_out_train_x = open('train_x_aptos2019-blindness-detection.pickle', 'wb') pickle.dump(x, pickle_out_train_x) pickle_out_train_x.close() pickle_out_train_y = open('train_y_aptos2019-blindness-detection.pickle', 'wb') pickle.dump(y, pickle_out_train_y) pickle_out_train_y.close() pickle_out_test_x = open('test_x_aptos2019-blindness-detection.pickle', 'wb') pickle.dump(test_x, pickle_out_test_x) pickle_out_test_x.close() """
code
17120125/cell_11
[ "text_plain_output_1.png" ]
import pickle import pickle pickle_in_train_x = open('../input/preprocessed-data-aptos2019blindnessdetection/train_x_aptos2019-blindness-detection.pickle', 'rb') pickle_in_train_y = open('../input/preprocessed-data-aptos2019blindnessdetection/train_y_aptos2019-blindness-detection.pickle', 'rb') pickle_in_test_x = open('../input/preprocessed-data-aptos2019blindnessdetection/test_x_aptos2019-blindness-detection.pickle', 'rb') train_x = pickle.load(pickle_in_train_x) train_y = pickle.load(pickle_in_train_y) test_x = pickle.load(pickle_in_test_x) print(train_x.shape, train_y.shape, test_x.shape)
code
17120125/cell_28
[ "text_plain_output_1.png" ]
""" pickle_in_train_x = open('../input/preprocessed-data-aptos2019blindnessdetection/train_x_aptos2019-blindness-detection.pickle', 'rb') pickle_in_train_y = open('../input/preprocessed-data-aptos2019blindnessdetection/train_y_aptos2019-blindness-detection.pickle', 'rb') pickle_in_test_x = open('../input/preprocessed-data-aptos2019blindnessdetection/test_x_aptos2019-blindness-detection.pickle', 'rb') train_x = pickle.load(pickle_in_train_x) train_y = pickle.load(pickle_in_train_y) test_x = pickle.load(pickle_in_test_x) print(train_x.shape, train_y.shape, test_x.shape) """
code
17120125/cell_17
[ "text_plain_output_1.png" ]
""" n = 10 cols = 5 rows = np.ceil(n/cols) fig = plt.gcf() fig.set_size_inches(cols * n, rows * n) for i in range(n): plt.subplot(rows, cols, i+1) plt.imshow(train_x[i]) plt.title(train['diagnosis'][i], fontsize=40) plt.axis('off') """
code
17120125/cell_35
[ "text_plain_output_1.png" ]
""" train_predicted = model_1.predict(train_x) train_predicted = [np.argmax(i) for i in train_predicted] from sklearn.metrics import cohen_kappa_score cohen_kappa_score(train_predicted, train_y, weights='quadratic') """
code
17120125/cell_31
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import pickle import tensorflow as tf IMG_SIZE = 512 import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import cv2 import os import pickle pickle_in_train_x = open('../input/preprocessed-data-aptos2019blindnessdetection/train_x_aptos2019-blindness-detection.pickle', 'rb') pickle_in_train_y = open('../input/preprocessed-data-aptos2019blindnessdetection/train_y_aptos2019-blindness-detection.pickle', 'rb') pickle_in_test_x = open('../input/preprocessed-data-aptos2019blindnessdetection/test_x_aptos2019-blindness-detection.pickle', 'rb') train_x = pickle.load(pickle_in_train_x) train_y = pickle.load(pickle_in_train_y) test_x = pickle.load(pickle_in_test_x) """ def crop_image1(img,tol=7): # img is image data # tol is tolerance mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] """ def crop_image_from_gray(img, tol=7): if img.ndim == 2: mask = img > tol return img[np.ix_(mask.any(1), mask.any(0))] elif img.ndim == 3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img > tol check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0] if check_shape == 0: return img else: img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))] img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))] img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))] img = np.stack([img1, img2, img3], axis=-1) return img def load_ben_color(data, img_size, sigmaX=10): if data.ndim == 4: for i in range(len(data)): image = cv2.cvtColor(data[i], cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image) image = cv2.resize(image, (img_size, img_size)) data[i] = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), sigmaX), -4, 128) elif data.ndim == 3: data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB) data = crop_image_from_gray(data) data = cv2.resize(data, (img_size, img_size)) data = cv2.addWeighted(data, 4, cv2.GaussianBlur(data, (0, 0), sigmaX), -4, 128) else: return 0 return data train_x = load_ben_color(train_x, IMG_SIZE, sigmaX=10) train_x = train_x.astype('float32') def create_model_1(): layers_1 = [tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation=tf.nn.relu, input_shape=train_x.shape[1:]), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=512, activation=tf.nn.relu), tf.keras.layers.Dense(units=256, activation=tf.nn.relu), tf.keras.layers.Dense(units=len(np.unique(train_y)), activation=tf.nn.softmax)] model_1 = tf.keras.Sequential(layers_1) model_1.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) return model_1 model_1 = create_model_1() model_1.summary()
code
17120125/cell_22
[ "image_output_1.png" ]
import cv2 import numpy as np import pickle IMG_SIZE = 512 import pickle pickle_in_train_x = open('../input/preprocessed-data-aptos2019blindnessdetection/train_x_aptos2019-blindness-detection.pickle', 'rb') pickle_in_train_y = open('../input/preprocessed-data-aptos2019blindnessdetection/train_y_aptos2019-blindness-detection.pickle', 'rb') pickle_in_test_x = open('../input/preprocessed-data-aptos2019blindnessdetection/test_x_aptos2019-blindness-detection.pickle', 'rb') train_x = pickle.load(pickle_in_train_x) train_y = pickle.load(pickle_in_train_y) test_x = pickle.load(pickle_in_test_x) """ def crop_image1(img,tol=7): # img is image data # tol is tolerance mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] """ def crop_image_from_gray(img, tol=7): if img.ndim == 2: mask = img > tol return img[np.ix_(mask.any(1), mask.any(0))] elif img.ndim == 3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img > tol check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0] if check_shape == 0: return img else: img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))] img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))] img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))] img = np.stack([img1, img2, img3], axis=-1) return img def load_ben_color(data, img_size, sigmaX=10): if data.ndim == 4: for i in range(len(data)): image = cv2.cvtColor(data[i], cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image) image = cv2.resize(image, (img_size, img_size)) data[i] = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), sigmaX), -4, 128) elif data.ndim == 3: data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB) data = crop_image_from_gray(data) data = cv2.resize(data, (img_size, img_size)) data = cv2.addWeighted(data, 4, cv2.GaussianBlur(data, (0, 0), sigmaX), -4, 128) else: return 0 return data train_x = load_ben_color(train_x, IMG_SIZE, sigmaX=10) train_x = train_x.astype('float32') train_x[0]
code
17120125/cell_10
[ "text_plain_output_1.png" ]
""" n = 10 cols = 5 rows = np.ceil(n/cols) fig = plt.gcf() fig.set_size_inches(cols * n, rows * n) for i in range(n): plt.subplot(rows, cols, i+1) plt.imshow(train_x[i]) plt.title(train['diagnosis'][i], fontsize=40) plt.axis('off') """
code
17120125/cell_37
[ "text_plain_output_1.png" ]
""" Memory error here # https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator datagen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1./255, rotation_range=30, brightness_range=[0.5, 1.5], zoom_range=[0.8, 1.2], horizontal_flip=True, vertical_flip=False) datagen.fit(train_x) checkpoint_path = 'cp_model_1_aptos2019-blindness-detection.ckpt' checkpoint_dir = os.path.dirname(checkpoint_path) # Create checkpoint callback cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) model_1 = create_model_1() # fits the model on batches with real-time data augmentation: model_1.fit_generator(datagen.flow(train_x, train_y, batch_size=32), steps_per_epoch=len(train_x) / 32, epochs=5, callbacks=[cp_callback]) """
code
17120125/cell_5
[ "text_html_output_1.png" ]
import tensorflow as tf import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import cv2 import os print('Tensorflow version:', tf.__version__)
code
1005795/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset['twp'].value_counts().head(5)
code
1005795/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset[dataset['Category'] == 'Traffic']['Sub-Category'].value_counts().head(6)
code
1005795/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset['Category'].value_counts()
code
1005795/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') plt.title('LOWER MERION Vehicle Accidents by timzone') sns.countplot('timezone', data=dataset[(dataset['twp'] == 'LOWER MERION') & (dataset['Sub-Category'] == ' VEHICLE ACCIDENT')])
code
1005795/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset.info()
code
1005795/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset[(dataset['twp'] == 'LOWER MERION') & (dataset['Category'] == 'Traffic')]['Sub-Category'].value_counts()
code
1005795/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset[(dataset['twp'] == 'LEHIGH COUNTY') & (dataset['Category'] == 'EMS')]['Sub-Category'].value_counts()
code
1005795/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset['twp'].nunique()
code
1005795/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset['title'].value_counts().head(5)
code
1005795/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') sns.countplot('Category', data=dataset[dataset['twp'] == 'LEHIGH COUNTY'])
code
1005795/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') plt.title('LOWER MERION Vehicle Accidents by month') sns.countplot('Month', data=dataset[(dataset['twp'] == 'LOWER MERION') & (dataset['Sub-Category'] == ' VEHICLE ACCIDENT')])
code
1005795/cell_11
[ "text_html_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset[dataset['Category'] == 'EMS']['Sub-Category'].value_counts().head(6)
code
1005795/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset['dayofweek'].value_counts()
code
1005795/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') sns.countplot('dayofweek', data=dataset)
code
1005795/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset['twp'].value_counts(ascending=True).head(5)
code
1005795/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') sns.countplot('Category', data=dataset)
code
1005795/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') sns.countplot('timezone', data=dataset)
code
1005795/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') plt.title('LOWER MERION incidents by Category') sns.countplot('Category', data=dataset[dataset['twp'] == 'LOWER MERION'])
code
1005795/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset['title'].nunique()
code
1005795/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dataset = pd.read_csv('../input/911.csv') plt.title('Overall Vehicle Accidents by month') sns.countplot('Month', data=dataset[dataset['Sub-Category'] == ' VEHICLE ACCIDENT'])
code
1005795/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset[dataset['Category'] == 'Fire']['Sub-Category'].value_counts().head(6)
code
1005795/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/911.csv') dataset.head(5)
code
130000822/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import classification_report, roc_auc_score, roc_curve,confusion_matrix from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from tensorflow import keras import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns import tensorflow as tf import cv2 import numpy as np import os import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, roc_auc_score, roc_curve, confusion_matrix import seaborn as sns EPOCHS = 10 IMG_WIDTH = 30 IMG_HEIGHT = 30 NUM_CATEGORIES = 43 TEST_SIZE = 0.4 data_dir = '/kaggle/input/gstrb-dataset/gtsrb' IMAGES = [] LABELS = [] for folder in os.listdir(data_dir): folder_path = os.path.join(data_dir, folder) if os.path.isdir(folder_path): for image_file in os.listdir(folder_path): image = cv2.imread(os.path.join(folder_path, image_file), cv2.IMREAD_ANYCOLOR) image = cv2.resize(image, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA) IMAGES.append(image) LABELS.append(int(folder)) image = mpimg.imread('/kaggle/input/gstrb-dataset/gtsrb/0/00000_00004.ppm') plt.axis('off') def get_model(): model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.MaxPooling2D(pool_size=(3, 3)), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(NUM_CATEGORIES, activation='softmax')]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model images, labels = (IMAGES, LABELS) labels = tf.keras.utils.to_categorical(labels) x_train, x_test, y_train, y_test = train_test_split(np.array(images), np.array(labels), test_size=TEST_SIZE) model = get_model() fitting = model.fit(x_train, y_train, epochs=EPOCHS, validation_data=(x_test, y_test)) model_evaluation = model.evaluate(x_test, y_test, verbose=2) filename = 'trained model.h5' model.save(filename) def view(history): pass view(fitting) def way2(history): pass way2(fitting) y_pred_proba = model.predict(x_test) y_true = np.argmax(y_test, axis=1) classification_rep = classification_report(y_true, np.argmax(y_pred_proba, axis=1)) auc_scores = [] for class_index in range(NUM_CATEGORIES): class_true = np.where(y_true == class_index, 1, 0) class_pred = y_pred_proba[:, class_index] auc_scores.append(roc_auc_score(class_true, class_pred)) for class_index in range(NUM_CATEGORIES): class_true = np.where(y_true == class_index, 1, 0) class_pred = y_pred_proba[:, class_index] fpr, tpr, _ = roc_curve(class_true, class_pred) auc_score = auc_scores[class_index] y_pred = model.predict(x_test) y_pred_labels = np.argmax(y_pred, axis=1) y_true_labels = np.argmax(keras.utils.to_categorical(y_true), axis=1) cm = confusion_matrix(y_true_labels, y_pred_labels) plt.figure(figsize=(12, 10)) sns.heatmap(cm, annot=True, fmt='d', cmap='Blues') plt.xlabel('Predicted Labels') plt.ylabel('True Labels') plt.title('Confusion Matrix') plt.show()
code
130000822/cell_6
[ "image_output_2.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf import cv2 import numpy as np import os import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, roc_auc_score, roc_curve, confusion_matrix import seaborn as sns EPOCHS = 10 IMG_WIDTH = 30 IMG_HEIGHT = 30 NUM_CATEGORIES = 43 TEST_SIZE = 0.4 data_dir = '/kaggle/input/gstrb-dataset/gtsrb' IMAGES = [] LABELS = [] for folder in os.listdir(data_dir): folder_path = os.path.join(data_dir, folder) if os.path.isdir(folder_path): for image_file in os.listdir(folder_path): image = cv2.imread(os.path.join(folder_path, image_file), cv2.IMREAD_ANYCOLOR) image = cv2.resize(image, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA) IMAGES.append(image) LABELS.append(int(folder)) image = mpimg.imread('/kaggle/input/gstrb-dataset/gtsrb/0/00000_00004.ppm') plt.axis('off') def get_model(): model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.MaxPooling2D(pool_size=(3, 3)), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(NUM_CATEGORIES, activation='softmax')]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model images, labels = (IMAGES, LABELS) labels = tf.keras.utils.to_categorical(labels) x_train, x_test, y_train, y_test = train_test_split(np.array(images), np.array(labels), test_size=TEST_SIZE) model = get_model() fitting = model.fit(x_train, y_train, epochs=EPOCHS, validation_data=(x_test, y_test)) model_evaluation = model.evaluate(x_test, y_test, verbose=2) filename = 'trained model.h5' model.save(filename) def view(history): plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model Accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper right') plt.show() view(fitting)
code
130000822/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 import numpy as np import os import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, roc_auc_score, roc_curve, confusion_matrix import seaborn as sns EPOCHS = 10 IMG_WIDTH = 30 IMG_HEIGHT = 30 NUM_CATEGORIES = 43 TEST_SIZE = 0.4
code
130000822/cell_7
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf import cv2 import numpy as np import os import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, roc_auc_score, roc_curve, confusion_matrix import seaborn as sns EPOCHS = 10 IMG_WIDTH = 30 IMG_HEIGHT = 30 NUM_CATEGORIES = 43 TEST_SIZE = 0.4 data_dir = '/kaggle/input/gstrb-dataset/gtsrb' IMAGES = [] LABELS = [] for folder in os.listdir(data_dir): folder_path = os.path.join(data_dir, folder) if os.path.isdir(folder_path): for image_file in os.listdir(folder_path): image = cv2.imread(os.path.join(folder_path, image_file), cv2.IMREAD_ANYCOLOR) image = cv2.resize(image, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA) IMAGES.append(image) LABELS.append(int(folder)) image = mpimg.imread('/kaggle/input/gstrb-dataset/gtsrb/0/00000_00004.ppm') plt.axis('off') def get_model(): model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.MaxPooling2D(pool_size=(3, 3)), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(NUM_CATEGORIES, activation='softmax')]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model images, labels = (IMAGES, LABELS) labels = tf.keras.utils.to_categorical(labels) x_train, x_test, y_train, y_test = train_test_split(np.array(images), np.array(labels), test_size=TEST_SIZE) model = get_model() fitting = model.fit(x_train, y_train, epochs=EPOCHS, validation_data=(x_test, y_test)) model_evaluation = model.evaluate(x_test, y_test, verbose=2) filename = 'trained model.h5' model.save(filename) def view(history): pass view(fitting) def way2(history): plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Accuracy and Loss') plt.ylabel('Accuracy/Loss') plt.xlabel('Epoch') plt.legend(['Train Accuracy', 'Test Accuracy', 'Train Loss', 'Test Loss'], loc='upper left') plt.show() way2(fitting)
code
130000822/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import classification_report, roc_auc_score, roc_curve,confusion_matrix from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf import cv2 import numpy as np import os import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, roc_auc_score, roc_curve, confusion_matrix import seaborn as sns EPOCHS = 10 IMG_WIDTH = 30 IMG_HEIGHT = 30 NUM_CATEGORIES = 43 TEST_SIZE = 0.4 data_dir = '/kaggle/input/gstrb-dataset/gtsrb' IMAGES = [] LABELS = [] for folder in os.listdir(data_dir): folder_path = os.path.join(data_dir, folder) if os.path.isdir(folder_path): for image_file in os.listdir(folder_path): image = cv2.imread(os.path.join(folder_path, image_file), cv2.IMREAD_ANYCOLOR) image = cv2.resize(image, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA) IMAGES.append(image) LABELS.append(int(folder)) image = mpimg.imread('/kaggle/input/gstrb-dataset/gtsrb/0/00000_00004.ppm') plt.axis('off') def get_model(): model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.MaxPooling2D(pool_size=(3, 3)), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(NUM_CATEGORIES, activation='softmax')]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model images, labels = (IMAGES, LABELS) labels = tf.keras.utils.to_categorical(labels) x_train, x_test, y_train, y_test = train_test_split(np.array(images), np.array(labels), test_size=TEST_SIZE) model = get_model() fitting = model.fit(x_train, y_train, epochs=EPOCHS, validation_data=(x_test, y_test)) model_evaluation = model.evaluate(x_test, y_test, verbose=2) filename = 'trained model.h5' model.save(filename) def view(history): pass view(fitting) def way2(history): pass way2(fitting) y_pred_proba = model.predict(x_test) y_true = np.argmax(y_test, axis=1) classification_rep = classification_report(y_true, np.argmax(y_pred_proba, axis=1)) print(classification_rep) auc_scores = [] for class_index in range(NUM_CATEGORIES): class_true = np.where(y_true == class_index, 1, 0) class_pred = y_pred_proba[:, class_index] auc_scores.append(roc_auc_score(class_true, class_pred)) for class_index, auc_score in enumerate(auc_scores): print('AUC Score (Class {}): {}'.format(class_index, auc_score)) plt.figure() plt.plot([0, 1], [0, 1], 'k--') for class_index in range(NUM_CATEGORIES): class_true = np.where(y_true == class_index, 1, 0) class_pred = y_pred_proba[:, class_index] fpr, tpr, _ = roc_curve(class_true, class_pred) auc_score = auc_scores[class_index] plt.plot(fpr, tpr, label='Class {} (AUC = {:.2f})'.format(class_index, auc_score)) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic (ROC) Curve') plt.legend(loc='lower right') plt.show()
code
130000822/cell_3
[ "image_output_1.png" ]
import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import cv2 import numpy as np import os import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, roc_auc_score, roc_curve, confusion_matrix import seaborn as sns EPOCHS = 10 IMG_WIDTH = 30 IMG_HEIGHT = 30 NUM_CATEGORIES = 43 TEST_SIZE = 0.4 data_dir = '/kaggle/input/gstrb-dataset/gtsrb' IMAGES = [] LABELS = [] for folder in os.listdir(data_dir): folder_path = os.path.join(data_dir, folder) if os.path.isdir(folder_path): for image_file in os.listdir(folder_path): image = cv2.imread(os.path.join(folder_path, image_file), cv2.IMREAD_ANYCOLOR) image = cv2.resize(image, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA) IMAGES.append(image) LABELS.append(int(folder)) image = mpimg.imread('/kaggle/input/gstrb-dataset/gtsrb/0/00000_00004.ppm') plt.imshow(image) plt.axis('off') plt.show()
code
130000822/cell_5
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import cv2 import numpy as np import os import tensorflow as tf import cv2 import numpy as np import os import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, roc_auc_score, roc_curve, confusion_matrix import seaborn as sns EPOCHS = 10 IMG_WIDTH = 30 IMG_HEIGHT = 30 NUM_CATEGORIES = 43 TEST_SIZE = 0.4 data_dir = '/kaggle/input/gstrb-dataset/gtsrb' IMAGES = [] LABELS = [] for folder in os.listdir(data_dir): folder_path = os.path.join(data_dir, folder) if os.path.isdir(folder_path): for image_file in os.listdir(folder_path): image = cv2.imread(os.path.join(folder_path, image_file), cv2.IMREAD_ANYCOLOR) image = cv2.resize(image, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA) IMAGES.append(image) LABELS.append(int(folder)) def get_model(): model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.MaxPooling2D(pool_size=(3, 3)), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.33), tf.keras.layers.Dense(NUM_CATEGORIES, activation='softmax')]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model images, labels = (IMAGES, LABELS) labels = tf.keras.utils.to_categorical(labels) x_train, x_test, y_train, y_test = train_test_split(np.array(images), np.array(labels), test_size=TEST_SIZE) model = get_model() fitting = model.fit(x_train, y_train, epochs=EPOCHS, validation_data=(x_test, y_test)) model_evaluation = model.evaluate(x_test, y_test, verbose=2) filename = 'trained model.h5' model.save(filename) print(f'Model saved to {filename}.')
code
17145266/cell_25
[ "text_plain_output_1.png" ]
(df_train.shape, df_valid.shape) path = Path('../input/') path.ls() src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) bs = 48 src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) data_lm = src_lm.label_for_lm().databunch(bs=bs) data_lm.vocab.itos[:20] data_lm.train_ds[0][0].data[:10]
code
17145266/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_json('../input/Sarcasm_Headlines_Dataset_v2.json', lines=True) df.head()
code
17145266/cell_23
[ "text_html_output_1.png" ]
(df_train.shape, df_valid.shape) path = Path('../input/') path.ls() src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) bs = 48 src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) data_lm = src_lm.label_for_lm().databunch(bs=bs) data_lm.show_batch()
code
17145266/cell_30
[ "text_plain_output_1.png" ]
(df_train.shape, df_valid.shape) path = Path('../input/') path.ls() src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) bs = 48 src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) data_lm = src_lm.label_for_lm().databunch(bs=bs) data_lm.vocab.itos[:20] data_lm.train_ds[0][0].data[:10] learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, model_dir='/temp/model/') learn.lr_find() learn.fit_one_cycle(4, 0.05, moms=(0.8, 0.7)) learn.save('fit_head') learn.load('fit_head') learn.unfreeze() learn.lr_find() learn.recorder.plot(suggestion=True)
code
17145266/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_json('../input/Sarcasm_Headlines_Dataset_v2.json', lines=True) df.shape df['headline'][0]
code
17145266/cell_29
[ "text_plain_output_1.png" ]
(df_train.shape, df_valid.shape) path = Path('../input/') path.ls() src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) bs = 48 src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) data_lm = src_lm.label_for_lm().databunch(bs=bs) data_lm.vocab.itos[:20] data_lm.train_ds[0][0].data[:10] learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, model_dir='/temp/model/') learn.lr_find() learn.fit_one_cycle(4, 0.05, moms=(0.8, 0.7)) learn.save('fit_head') learn.load('fit_head')
code
17145266/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
(df_train.shape, df_valid.shape) path = Path('../input/') path.ls() src = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) bs = 48 src_lm = ItemLists(path, TextList.from_df(df_train, path='.', cols=1), TextList.from_df(df_valid, path='.', cols=1)) data_lm = src_lm.label_for_lm().databunch(bs=bs) data_lm.vocab.itos[:20] data_lm.train_ds[0][0].data[:10] src_clas = ItemLists(path, TextList.from_df(df_train, path='.', cols=1, vocab=data_lm.vocab), TextList.from_df(df_valid, path='.', cols=1, vocab=data_lm.vocab)) data_clas = src_clas.label_from_df(cols=2).databunch(bs=bs) data_clas.show_batch()
code
17145266/cell_2
[ "text_plain_output_1.png" ]
!pip install pretrainedmodels !pip install fastai==1.0.52 import fastai from fastai import * from fastai.vision import * from fastai.text import * from torchvision.models import * import pretrainedmodels from utils import * import sys from fastai.callbacks.tracker import EarlyStoppingCallback from fastai.callbacks.tracker import SaveModelCallback
code
17145266/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code