path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32062359/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from cord import ResearchPapers research_papers = ResearchPapers.load()
code
32062359/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from cord import ResearchPapers research_papers = ResearchPapers.load() help(research_papers.search)
code
32062359/cell_17
[ "text_plain_output_1.png" ]
from langdetect import detect from nltk.tokenize import sent_tokenize,word_tokenize from tqdm import tqdm import pandas as pd import pandas as pd keywordlist = ['inhibitor'] def loopsearch(keywordlist, researchpaperfu): alldataframeco = pd.DataFrame() alldataframenoco = pd.DataFrame() allcopid = [] allnocopid = [] for i in tqdm(keywordlist): covinf = researchpaperfu.covid_related().search(i, num_results=1000, covid_related=False, view='table').results[['cord_uid', 'title', 'abstract']] notcovinf = researchpaperfu.not_covid_related().search(i, num_results=10000, covid_related=False, view='table').results[['cord_uid', 'title', 'abstract']] covinfpid = list(covinf.cord_uid.values) notcovinfpid = list(notcovinf.cord_uid.values) alldataframeco = pd.concat([covinf, alldataframeco]) alldataframenoco = pd.concat([notcovinf, alldataframenoco]) allcopid.append(covinfpid) allnocopid.append(notcovinfpid) alldataframeco = alldataframeco.drop_duplicates() alldataframenoco = alldataframenoco.drop_duplicates() return (allcopid, allnocopid, alldataframeco, alldataframenoco) fullab = pd.concat([allcoab, allnocoab]) fullab = fullab.rename(columns={'cord_uid': 'pid'}) fullab = fullab[fullab.abstract != ''] lan = [] for i in fullab.abstract: lan1 = detect(i) lan.append(lan1) fullab['lan'] = lan fullab = fullab[fullab.lan == 'en'] fullab = fullab[['pid', 'title', 'abstract']] question = 'Q1' question_dir = question + '/' keylist = pd.read_csv('/kaggle/input/kagglecovid19literature/results/' + question_dir + 'keylist.txt').columns.values valuelist = pd.read_csv('/kaggle/input/kagglecovid19literature/results/' + question_dir + 'valuelist.txt', header=None).values viruslist = pd.read_csv('/kaggle/input/kagglecovid19literature/results/' + question_dir + 'viruslist.txt', header=None).values def build_raw_data(file): def retunsb(sentlist, i, lennu): sent = sentlist[i] if i - 1 < 0: present = '' else: present = sentlist[i - 1] if i + 1 >= lennu: aftsent = '' else: aftsent = sentlist[i + 1] tempsent = '' tempsent = tempsent.join([present, sent, aftsent]) return tempsent allfile = file allfile['abstract'] = allfile.abstract.astype(str) allsent = [] allid = [] allab = [] for i in tqdm(range(len(allfile))): temp = allfile.abstract.iloc[i] temp = sent_tokenize(temp) for j in range(len(temp)): tempab = retunsb(temp, j, len(temp)) allsent.append(temp[j]) allid.append(allfile.pid.iloc[i]) allab.append(tempab) allsent = pd.DataFrame(allsent, columns=['sent']) allsent['pid'] = allid allsent['abstract'] = allab return (allfile, allsent) allfile, allsent = build_raw_data(fullab)
code
32062359/cell_10
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from cord import ResearchPapers from tqdm import tqdm import pandas as pd import pandas as pd keywordlist = ['inhibitor'] research_papers = ResearchPapers.load() help(research_papers.search) def loopsearch(keywordlist, researchpaperfu): alldataframeco = pd.DataFrame() alldataframenoco = pd.DataFrame() allcopid = [] allnocopid = [] for i in tqdm(keywordlist): covinf = researchpaperfu.covid_related().search(i, num_results=1000, covid_related=False, view='table').results[['cord_uid', 'title', 'abstract']] notcovinf = researchpaperfu.not_covid_related().search(i, num_results=10000, covid_related=False, view='table').results[['cord_uid', 'title', 'abstract']] covinfpid = list(covinf.cord_uid.values) notcovinfpid = list(notcovinf.cord_uid.values) alldataframeco = pd.concat([covinf, alldataframeco]) alldataframenoco = pd.concat([notcovinf, alldataframenoco]) allcopid.append(covinfpid) allnocopid.append(notcovinfpid) alldataframeco = alldataframeco.drop_duplicates() alldataframenoco = alldataframenoco.drop_duplicates() return (allcopid, allnocopid, alldataframeco, alldataframenoco) keywordlist = ['inhibitor'] _, _, allcoab, allnocoab = loopsearch(keywordlist, research_papers)
code
2019285/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) data.plot.scatter(x=var, y='price', ylim=(0, 8000000))
code
2019285/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.describe()
code
2019285/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #scatter plot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) data.plot.scatter(x=var2, y='price', ylim=(0, 8000000))
code
2019285/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.info()
code
2019285/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') f, ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax)
code
2019285/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb import statsmodels.api as sm df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #scatter plot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); # Correlation matrix updated (X) X = df[['bedrooms','floors','condition','grade','sqft_basement','yr_built','yr_renovated','lat','long','sqft_lot15']] f,ax = plt.subplots(figsize=(18, 18)) sb.heatmap(X.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax) X = df[['bedrooms', 'floors', 'condition', 'grade', 'sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_lot15']] y = df['price'] est = sm.OLS(y, X).fit() est.summary()
code
2019285/cell_3
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import scale import statsmodels.api as sm from sklearn.preprocessing import StandardScaler scale = StandardScaler()
code
2019285/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #scatter plot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); # Correlation matrix updated (X) X = df[['bedrooms','floors','condition','grade','sqft_basement','yr_built','yr_renovated','lat','long','sqft_lot15']] f,ax = plt.subplots(figsize=(18, 18)) sb.heatmap(X.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax) X = df[['bedrooms', 'floors', 'condition', 'grade', 'sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_lot15']] y = df['price'] LinReg = LinearRegression(normalize=True) LinReg.fit(X, y) print(LinReg.score(X, y))
code
2019285/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y='price', data=data) fig.axis(ymin=0, ymax=8000000)
code
2019285/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #scatter plot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); X = df[['bedrooms', 'floors', 'condition', 'grade', 'sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_lot15']] f, ax = plt.subplots(figsize=(18, 18)) sb.heatmap(X.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax)
code
2019285/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.head()
code
2005289/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.type.unique() type_count_series = anime_df_nnull['type'].value_counts() type_df = type_count_series.to_frame() type_df type_df = type_df.reset_index() type_df.columns = ['type', 'counts'] type_df type_members_series = anime_df_nnull.groupby('type')['members'].agg('sum').reset_index() type_members_df = pd.DataFrame(data=type_members_series) type_members_df = type_members_df.sort_values('members') anime_df_nnull['episodes'] = anime_df_nnull['episodes'].astype(int) sns.pairplot(anime_df_nnull[['type', 'members', 'episodes', 'rating']], hue='type')
code
2005289/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.type.unique()
code
2005289/cell_4
[ "text_html_output_1.png" ]
import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) rating_df.shape
code
2005289/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes
code
2005289/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.type.unique() type_count_series = anime_df_nnull['type'].value_counts() type_df = type_count_series.to_frame() type_df type_df = type_df.reset_index() type_df.columns = ['type', 'counts'] type_df sns.barplot(y=type_df['counts'], x=type_df['type'])
code
2005289/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum()
code
2005289/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.head()
code
2005289/cell_15
[ "text_html_output_1.png" ]
import collections import itertools import numpy as np import pandas as pd import seaborn as sns anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.type.unique() type_count_series = anime_df_nnull['type'].value_counts() type_df = type_count_series.to_frame() type_df type_df = type_df.reset_index() type_df.columns = ['type', 'counts'] type_df type_members_series = anime_df_nnull.groupby('type')['members'].agg('sum').reset_index() type_members_df = pd.DataFrame(data=type_members_series) type_members_df = type_members_df.sort_values('members') anime_df_nnull['episodes'] = anime_df_nnull['episodes'].astype(int) genre_values_list = anime_df_nnull['genre'].apply(lambda x: x.split(', ')).values.tolist() genre_value_chain = itertools.chain(*genre_values_list) genre_counter = collections.Counter(genre_value_chain) genre_df = pd.DataFrame.from_dict(genre_counter, orient='index').reset_index() genre_df.columns = ['genre', 'count'] genre_df = genre_df.sort_values('count', ascending=False) sns.barplot(x=genre_df['count'], y=genre_df['genre'])
code
2005289/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape
code
2005289/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.type.unique() type_count_series = anime_df_nnull['type'].value_counts() type_df = type_count_series.to_frame() type_df type_df = type_df.reset_index() type_df.columns = ['type', 'counts'] type_df type_members_series = anime_df_nnull.groupby('type')['members'].agg('sum').reset_index() type_members_df = pd.DataFrame(data=type_members_series) type_members_df = type_members_df.sort_values('members') anime_df_nnull['episodes'] = anime_df_nnull['episodes'].astype(int) sns.boxplot(data=anime_df_nnull, x='type', y='rating')
code
2005289/cell_10
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.type.unique() type_count_series = anime_df_nnull['type'].value_counts() type_df = type_count_series.to_frame() type_df
code
2005289/cell_12
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.dtypes anime_df.isnull().sum() anime_df = anime_df.replace('Unknown', np.nan) anime_df_nnull = anime_df.dropna() anime_df_nnull.type.unique() type_count_series = anime_df_nnull['type'].value_counts() type_df = type_count_series.to_frame() type_df type_df = type_df.reset_index() type_df.columns = ['type', 'counts'] type_df type_members_series = anime_df_nnull.groupby('type')['members'].agg('sum').reset_index() type_members_df = pd.DataFrame(data=type_members_series) type_members_df = type_members_df.sort_values('members') sns.barplot(y=type_members_df['members'], x=type_members_df['type'])
code
2005289/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd anime_df = pd.read_csv('../input/anime.csv', header=0) rating_df = pd.read_csv('../input/rating.csv', header=0) anime_df.shape anime_df.head()
code
2036880/cell_4
[ "text_plain_output_1.png" ]
from subprocess import check_output import IPython import matplotlib #collection of functions for scientific and publication-ready visualization import numpy as np #foundational package for scientific computing import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features import scipy as sp #collection of functions for scientific computing and advance mathematics import sklearn #collection of machine learning algorithms import sys #access to system parameters https://docs.python.org/3/library/sys.html import warnings import sys import pandas as pd import matplotlib import numpy as np import scipy as sp import IPython from IPython import display import sklearn import timeit import random import time import warnings warnings.filterwarnings('ignore') from subprocess import check_output data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') print('data_train.shape: {}'.format(data_train.shape)) print('data_test.shape: {}'.format(data_test.shape))
code
2036880/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import IPython import matplotlib #collection of functions for scientific and publication-ready visualization import numpy as np #foundational package for scientific computing import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features import scipy as sp #collection of functions for scientific computing and advance mathematics import sklearn #collection of machine learning algorithms import sys #access to system parameters https://docs.python.org/3/library/sys.html import warnings import sys print('Python version: {}'.format(sys.version)) import pandas as pd print('pandas version: {}'.format(pd.__version__)) import matplotlib print('matplotlib version: {}'.format(matplotlib.__version__)) import numpy as np print('NumPy version: {}'.format(np.__version__)) import scipy as sp print('SciPy version: {}'.format(sp.__version__)) import IPython from IPython import display print('IPython version: {}'.format(IPython.__version__)) import sklearn print('scikit-learn version: {}'.format(sklearn.__version__)) import timeit import random import time import warnings warnings.filterwarnings('ignore') print('-' * 25) from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2036880/cell_18
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import Imputer from subprocess import check_output import IPython import matplotlib #collection of functions for scientific and publication-ready visualization import numpy as np #foundational package for scientific computing import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features import scipy as sp #collection of functions for scientific computing and advance mathematics import sklearn #collection of machine learning algorithms import sys #access to system parameters https://docs.python.org/3/library/sys.html import warnings import sys import pandas as pd import matplotlib import numpy as np import scipy as sp import IPython from IPython import display import sklearn import timeit import random import time import warnings warnings.filterwarnings('ignore') from subprocess import check_output data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') X = data_train.copy(deep=True) X_test = data_test.copy(deep=True) X.drop(['SalePrice'], axis=1, inplace=True) columns_too_less = ['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscVal', 'MiscFeature'] X.drop(columns_too_less, axis=1, inplace=True) X_test.drop(columns_too_less, axis=1, inplace=True) columns_drop_std = X.std()[X.std() < 3].index X.drop(columns_drop_std, axis=1, inplace=True) X_test.drop(columns_drop_std, axis=1, inplace=True) X = X.select_dtypes(exclude=['object']) X_test = X_test.select_dtypes(exclude=['object']) columns_drop_corr = X.corrwith(data_train['SalePrice'])[X.corrwith(data_train['SalePrice']) < 0.2].index X.drop(columns_drop_corr, axis=1, inplace=True) X_test.drop(columns_drop_corr, axis=1, inplace=True) columns = X.columns from sklearn.preprocessing import Imputer imputer = Imputer() X_numerical = imputer.fit_transform(X) X_test_numerical = imputer.fit_transform(X_test) X_numerical = pd.DataFrame(X_numerical, columns=columns) X_test_numerical = pd.DataFrame(X_test_numerical, columns=columns) print('X_numerical.shape: {}'.format(X_numerical.shape)) print('X_test_numerical.shape: {}'.format(X_test_numerical.shape))
code
2036880/cell_16
[ "image_output_1.png" ]
from subprocess import check_output import IPython import matplotlib #collection of functions for scientific and publication-ready visualization import numpy as np #foundational package for scientific computing import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features import scipy as sp #collection of functions for scientific computing and advance mathematics import sklearn #collection of machine learning algorithms import sys #access to system parameters https://docs.python.org/3/library/sys.html import warnings import sys import pandas as pd import matplotlib import numpy as np import scipy as sp import IPython from IPython import display import sklearn import timeit import random import time import warnings warnings.filterwarnings('ignore') from subprocess import check_output data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') X = data_train.copy(deep=True) X_test = data_test.copy(deep=True) X.drop(['SalePrice'], axis=1, inplace=True) columns_too_less = ['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscVal', 'MiscFeature'] X.drop(columns_too_less, axis=1, inplace=True) X_test.drop(columns_too_less, axis=1, inplace=True) columns_drop_std = X.std()[X.std() < 3].index X.drop(columns_drop_std, axis=1, inplace=True) X_test.drop(columns_drop_std, axis=1, inplace=True) X = X.select_dtypes(exclude=['object']) X_test = X_test.select_dtypes(exclude=['object']) columns_drop_corr = X.corrwith(data_train['SalePrice'])[X.corrwith(data_train['SalePrice']) < 0.2].index X.drop(columns_drop_corr, axis=1, inplace=True) X_test.drop(columns_drop_corr, axis=1, inplace=True) columns = X.columns columns
code
2036880/cell_10
[ "text_plain_output_1.png" ]
from subprocess import check_output import IPython import matplotlib #collection of functions for scientific and publication-ready visualization import matplotlib.pyplot as plt import numpy as np #foundational package for scientific computing import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features import scipy as sp #collection of functions for scientific computing and advance mathematics import seaborn as sns import sklearn #collection of machine learning algorithms import sys #access to system parameters https://docs.python.org/3/library/sys.html import warnings import sys import pandas as pd import matplotlib import numpy as np import scipy as sp import IPython from IPython import display import sklearn import timeit import random import time import warnings warnings.filterwarnings('ignore') from subprocess import check_output data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') X = data_train.copy(deep=True) X_test = data_test.copy(deep=True) X.drop(['SalePrice'], axis=1, inplace=True) columns_too_less = ['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscVal', 'MiscFeature'] X.drop(columns_too_less, axis=1, inplace=True) X_test.drop(columns_too_less, axis=1, inplace=True) columns_drop_std = X.std()[X.std() < 3].index X.drop(columns_drop_std, axis=1, inplace=True) X_test.drop(columns_drop_std, axis=1, inplace=True) def correlation_heatmap(df): _, ax = plt.subplots(figsize=(24, 20)) colormap = sns.diverging_palette(220, 10, as_cmap=True) _ = sns.heatmap(df.corr(), cmap=colormap, square=True, cbar_kws={'shrink': 0.9}, ax=ax, annot=True, linewidths=0.1, vmax=1.0, vmin=-1.0, linecolor='white', annot_kws={'fontsize': 12}) plt.title('Pearson Correlation of Features', y=1.05, size=15) correlation_heatmap(X)
code
74042371/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features))
code
74042371/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) train.isnull().sum() train.isnull().sum().plot(kind='bar', figsize=(25, 15))
code
74042371/cell_23
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) print('missing values count in train: ', train.isnull().sum().sum()) print('missing values count in test: ', test.isnull().sum().sum())
code
74042371/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) plt.figure(figsize=(20, 150)) for i in enumerate(features): plt.subplot(20, 6, i[0] + 1) plt.hist(i[1], bins=50)
code
74042371/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') display(train.head()) display(test.head()) display(sub.head())
code
74042371/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) train.isnull().sum() plt.figure(figsize=(25, 15)) sns.boxplot(data=train)
code
74042371/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) test.isnull().sum()
code
74042371/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) plt.figure(figsize=(25, 20)) sns.heatmap(train.corr(), annot=True, fmt='.1f', linewidth=1)
code
74042371/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74042371/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') print('size of train: ', train.shape) print('size of test: ', test.shape) print('size of submission: ', sub.shape)
code
74042371/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) plt.figure(figsize=(7, 6)) sns.distplot(train['claim'])
code
74042371/cell_8
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') print(train.info()) print(test.info())
code
74042371/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) train['claim'].value_counts()
code
74042371/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) train['claim'].value_counts().plot(kind='bar')
code
74042371/cell_24
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) train.isnull().sum()
code
74042371/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) train.describe()
code
74042371/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns features = list(train.columns) list(enumerate(features)) test.isnull().sum() test.isnull().sum().plot(kind='bar', figsize=(25, 15))
code
74042371/cell_12
[ "text_html_output_2.png", "text_html_output_1.png", "text_html_output_3.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns
code
1009060/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output from subprocess import check_output directory = '../input/' train = pd.read_csv(directory + 'train.csv') test = pd.read_csv(directory + 'test.csv') numeric_feats = [x for x in train.columns[1:-1] if 'cont' in x] categorical_feats = [x for x in train.columns[1:-1] if 'cat' in x] catwithdummies = pd.get_dummies(train) catwithdummies = pd.get_dummies(categorical_feats) index = list(train.index) print(index[0:10]) np.random.shuffle(index) print(index[0:10]) train = train.iloc[index] 'train = train.iloc[np.random.permutation(len(train))]' test['loss'] = np.nan y = np.log(train['loss'].values + 200) id_train = train['id'].values id_test = test['id'].values ntrain = train.shape[0] tr_te = pd.concat((train, test), axis=0) sparse_data = [] f_cat = [f for f in tr_te.columns if 'cat' in f] for f in f_cat: dummy = pd.get_dummies(tr_te[f].astype('category')) tmp = csr_matrix(dummy) sparse_data.append(tmp) f_num = [f for f in tr_te.columns if 'cont' in f] scaler = StandardScaler() tmp = csr_matrix(scaler.fit_transform(tr_te[f_num])) sparse_data.append(tmp) del (tr_te, train, test) xtr_te = hstack(sparse_data, format='csr') xtrain = xtr_te[:ntrain, :] xtest = xtr_te[ntrain:, :] print('Dim train', xtrain.shape) print('Dim test', xtest.shape)
code
1009060/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output from subprocess import check_output directory = '../input/' train = pd.read_csv(directory + 'train.csv') test = pd.read_csv(directory + 'test.csv') numeric_feats = [x for x in train.columns[1:-1] if 'cont' in x] categorical_feats = [x for x in train.columns[1:-1] if 'cat' in x] catwithdummies = pd.get_dummies(train) catwithdummies = pd.get_dummies(categorical_feats) print(categorical_feats)
code
1009060/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) directory = '../input/' train = pd.read_csv(directory + 'train.csv') test = pd.read_csv(directory + 'test.csv') numeric_feats = [x for x in train.columns[1:-1] if 'cont' in x] categorical_feats = [x for x in train.columns[1:-1] if 'cat' in x] catwithdummies = pd.get_dummies(train) print.head(catwithdummies)
code
1009060/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output from subprocess import check_output directory = '../input/' train = pd.read_csv(directory + 'train.csv') test = pd.read_csv(directory + 'test.csv') numeric_feats = [x for x in train.columns[1:-1] if 'cont' in x] categorical_feats = [x for x in train.columns[1:-1] if 'cat' in x] catwithdummies = pd.get_dummies(train) catwithdummies = pd.get_dummies(categorical_feats) print(catwithdummies.shape)
code
130021146/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import os import splitfolders os.makedirs('output') os.makedirs('output/train') os.makedirs('output/val') os.makedirs('output/test') loc = '/kaggle/input/skin-diseases-image-dataset/IMG_CLASSES' splitfolders.ratio(loc, output='output', ratio=(0.8, 0.1, 0.1))
code
130021146/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.image as mping import matplotlib.pyplot as plt import os import random import splitfolders os.makedirs('output') os.makedirs('output/train') os.makedirs('output/val') os.makedirs('output/test') loc = '/kaggle/input/skin-diseases-image-dataset/IMG_CLASSES' splitfolders.ratio(loc, output='output', ratio=(0.8, 0.1, 0.1)) def random_image(val_dir, val_class): folder = val_dir + val_class random_image = random.sample(os.listdir(folder), 1) img = mping.imread(folder + '/' + random_image[0]) return img fig = plt.figure(figsize=(10, 7)) fig.add_subplot(2, 2, 1) img_1 = random_image(val_dir='./output/val/', val_class='2. Melanoma 15.75k') fig.add_subplot(2, 2, 2) img_2 = random_image(val_dir='./output/val/', val_class='4. Basal Cell Carcinoma (BCC) 3323') fig.add_subplot(2, 2, 3) img_3 = random_image(val_dir='./output/val/', val_class='5. Melanocytic Nevi (NV) - 7970') fig.add_subplot(2, 2, 4) img4 = random_image(val_dir='./output/val/', val_class='1. Eczema 1677')
code
130021146/cell_19
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Input , Dense , Flatten , GlobalAveragePooling2D from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing import image_dataset_from_directory import matplotlib.image as mping import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import random import random import splitfolders import tensorflow as tf os.makedirs('output') os.makedirs('output/train') os.makedirs('output/val') os.makedirs('output/test') loc = '/kaggle/input/skin-diseases-image-dataset/IMG_CLASSES' splitfolders.ratio(loc, output='output', ratio=(0.8, 0.1, 0.1)) def random_image(val_dir, val_class): folder = val_dir + val_class random_image = random.sample(os.listdir(folder), 1) img = mping.imread(folder + '/' + random_image[0]) return img fig = plt.figure(figsize=(10, 7)) #Add an Axes to the figure as part of a subplot arrangement(Three integers (nrows, ncols, index).) fig.add_subplot(2,2,1) img_1 = random_image(val_dir = "./output/val/",val_class = "2. Melanoma 15.75k") fig.add_subplot(2,2,2) img_2 = random_image(val_dir = "./output/val/",val_class = "4. Basal Cell Carcinoma (BCC) 3323") fig.add_subplot(2,2,3) img_3 = random_image(val_dir = "./output/val/",val_class = "5. Melanocytic Nevi (NV) - 7970") fig.add_subplot(2,2,4) img4 = random_image(val_dir = "./output/val/",val_class = "1. Eczema 1677") fig = plt.figure(figsize=(10, 7)) #Add an Axes to the figure as part of a subplot arrangement(Three integers (nrows, ncols, index).) fig.add_subplot(2,2,1) img_1 = random_image(val_dir = "./output/val/",val_class = "3. Atopic Dermatitis - 1.25k") fig.add_subplot(2,2,2) img_2 = random_image(val_dir = "./output/val/",val_class = "6. Benign Keratosis-like Lesions (BKL) 2624") fig.add_subplot(2,2,3) img_3 = random_image(val_dir = "./output/val/",val_class = "7. Psoriasis pictures Lichen Planus and related diseases - 2k") fig.add_subplot(2,2,4) img4 = random_image(val_dir = "./output/val/",val_class = "10. Warts Molluscum and other Viral Infections - 2103") from tensorflow.keras.preprocessing import image_dataset_from_directory train_dir = './output/train' test_dir = './output/test' val_dir = './output/val' train_data = image_dataset_from_directory(train_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=True, seed=42) test_data = image_dataset_from_directory(test_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=False, seed=42) val_data = image_dataset_from_directory(val_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=False, seed=42) addModel = tf.keras.applications.xception.Xception(input_shape=(299, 299, 3), include_top=False, weights='imagenet') model = Sequential() model.add(addModel) model.add(GlobalAveragePooling2D()) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax', name='classification')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() history_1 = model.fit(train_data, validation_data=val_data, epochs=10) model.evaluate(val_data) plt.figure() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['loss', 'val_loss'], loc='upper right') plt.show() plt.figure() plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.legend(['accuracy', 'val_accuracy'], loc='upper right') plt.show()
code
130021146/cell_7
[ "text_plain_output_1.png" ]
!pip install split_folders
code
130021146/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Input , Dense , Flatten , GlobalAveragePooling2D from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing import image_dataset_from_directory import tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory train_dir = './output/train' test_dir = './output/test' val_dir = './output/val' train_data = image_dataset_from_directory(train_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=True, seed=42) test_data = image_dataset_from_directory(test_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=False, seed=42) val_data = image_dataset_from_directory(val_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=False, seed=42) addModel = tf.keras.applications.xception.Xception(input_shape=(299, 299, 3), include_top=False, weights='imagenet') model = Sequential() model.add(addModel) model.add(GlobalAveragePooling2D()) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax', name='classification')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() history_1 = model.fit(train_data, validation_data=val_data, epochs=10)
code
130021146/cell_15
[ "image_output_1.png" ]
from tensorflow.keras.preprocessing import image_dataset_from_directory from tensorflow.keras.preprocessing import image_dataset_from_directory train_dir = './output/train' test_dir = './output/test' val_dir = './output/val' train_data = image_dataset_from_directory(train_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=True, seed=42) test_data = image_dataset_from_directory(test_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=False, seed=42) val_data = image_dataset_from_directory(val_dir, label_mode='categorical', image_size=(299, 299), batch_size=32, shuffle=False, seed=42)
code
130021146/cell_16
[ "image_output_1.png" ]
from tensorflow.keras.layers import Input , Dense , Flatten , GlobalAveragePooling2D from tensorflow.keras.models import Sequential import tensorflow as tf addModel = tf.keras.applications.xception.Xception(input_shape=(299, 299, 3), include_top=False, weights='imagenet') model = Sequential() model.add(addModel) model.add(GlobalAveragePooling2D()) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax', name='classification'))
code
130021146/cell_17
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Input , Dense , Flatten , GlobalAveragePooling2D from tensorflow.keras.models import Sequential import tensorflow as tf addModel = tf.keras.applications.xception.Xception(input_shape=(299, 299, 3), include_top=False, weights='imagenet') model = Sequential() model.add(addModel) model.add(GlobalAveragePooling2D()) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax', name='classification')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005), loss='categorical_crossentropy', metrics=['accuracy']) model.summary()
code
130021146/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.image as mping import matplotlib.pyplot as plt import os import random import splitfolders os.makedirs('output') os.makedirs('output/train') os.makedirs('output/val') os.makedirs('output/test') loc = '/kaggle/input/skin-diseases-image-dataset/IMG_CLASSES' splitfolders.ratio(loc, output='output', ratio=(0.8, 0.1, 0.1)) def random_image(val_dir, val_class): folder = val_dir + val_class random_image = random.sample(os.listdir(folder), 1) img = mping.imread(folder + '/' + random_image[0]) return img fig = plt.figure(figsize=(10, 7)) #Add an Axes to the figure as part of a subplot arrangement(Three integers (nrows, ncols, index).) fig.add_subplot(2,2,1) img_1 = random_image(val_dir = "./output/val/",val_class = "2. Melanoma 15.75k") fig.add_subplot(2,2,2) img_2 = random_image(val_dir = "./output/val/",val_class = "4. Basal Cell Carcinoma (BCC) 3323") fig.add_subplot(2,2,3) img_3 = random_image(val_dir = "./output/val/",val_class = "5. Melanocytic Nevi (NV) - 7970") fig.add_subplot(2,2,4) img4 = random_image(val_dir = "./output/val/",val_class = "1. Eczema 1677") fig = plt.figure(figsize=(10, 7)) fig.add_subplot(2, 2, 1) img_1 = random_image(val_dir='./output/val/', val_class='3. Atopic Dermatitis - 1.25k') fig.add_subplot(2, 2, 2) img_2 = random_image(val_dir='./output/val/', val_class='6. Benign Keratosis-like Lesions (BKL) 2624') fig.add_subplot(2, 2, 3) img_3 = random_image(val_dir='./output/val/', val_class='7. Psoriasis pictures Lichen Planus and related diseases - 2k') fig.add_subplot(2, 2, 4) img4 = random_image(val_dir='./output/val/', val_class='10. Warts Molluscum and other Viral Infections - 2103')
code
1003458/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') gender_submission = pd.read_csv('../input/gender_submission.csv')
code
106195418/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import seaborn as sns import tensorboard as tb import tensorflow as tf import torch import copy from pathlib import Path import warnings import holidays import seaborn as sns import matplotlib import matplotlib.dates as mdates import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import numpy as np import pandas as pd import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import TensorBoardLogger import torch DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters import random import tensorflow as tf import tensorboard as tb tf.io.gfile = tb.compat.tensorflow_stub.io.gfile random.seed(30) np.random.seed(30) tf.random.set_seed(30) torch.manual_seed(30) torch.cuda.manual_seed(30) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) data = pd.concat([train, test], axis=0, ignore_index=True) assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data) assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data) // data['date'].nunique() train.isna().sum(axis=0).rename('nans_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.isna().sum(axis=0).rename('nans_per_column_test').rename_axis('column').reset_index().set_index('column')) train.nunique(axis=0).rename('n_unique_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.nunique(axis=0).rename('n_unique_per_column_test').rename_axis('column').reset_index().set_index('column')) fig, ax = plt.subplots(1,1, figsize=(20, 6)) sns.kdeplot(data=train, x = 'num_sold', hue = 'country', fill=True, alpha = 0.15, ax = ax, linewidth=3, palette='pastel') ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([]) ax.set_title('Density plot for num_sold per country (clipped at 700)', fontweight = 'bold', fontsize = 20); fig, ax = plt.subplots(1,1, figsize=(20, 6)) sns.kdeplot(data=train, x = 'num_sold', hue = 'store', fill=True, alpha = 0.15, ax = ax, linewidth=2.5) ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.set_title('Density plot for num_sold per store (clipped at 700)', fontweight = 'bold', fontsize = 20) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([]); fig, ax = plt.subplots(1, 1, figsize=(20, 6)) sns.kdeplot(data=train, x='num_sold', hue='product', fill=True, alpha=0.05, ax=ax, linewidth=2.5) ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.set_title('Density plot for num_sold per product (clipped at 700)', fontweight='bold', fontsize=20) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([])
code
106195418/cell_9
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) data = pd.concat([train, test], axis=0, ignore_index=True) assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data) assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data) // data['date'].nunique() train.isna().sum(axis=0).rename('nans_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.isna().sum(axis=0).rename('nans_per_column_test').rename_axis('column').reset_index().set_index('column'))
code
106195418/cell_4
[ "image_output_1.png" ]
!pip install pytorch_forecasting
code
106195418/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import random import tensorboard as tb import tensorflow as tf import torch import copy from pathlib import Path import warnings import holidays import seaborn as sns import matplotlib import matplotlib.dates as mdates import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import numpy as np import pandas as pd import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import TensorBoardLogger import torch DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters import random import tensorflow as tf import tensorboard as tb tf.io.gfile = tb.compat.tensorflow_stub.io.gfile random.seed(30) np.random.seed(30) tf.random.set_seed(30) torch.manual_seed(30) torch.cuda.manual_seed(30)
code
106195418/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import seaborn as sns import tensorboard as tb import tensorflow as tf import torch import copy from pathlib import Path import warnings import holidays import seaborn as sns import matplotlib import matplotlib.dates as mdates import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import numpy as np import pandas as pd import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import TensorBoardLogger import torch DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters import random import tensorflow as tf import tensorboard as tb tf.io.gfile = tb.compat.tensorflow_stub.io.gfile random.seed(30) np.random.seed(30) tf.random.set_seed(30) torch.manual_seed(30) torch.cuda.manual_seed(30) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) data = pd.concat([train, test], axis=0, ignore_index=True) assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data) assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data) // data['date'].nunique() train.isna().sum(axis=0).rename('nans_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.isna().sum(axis=0).rename('nans_per_column_test').rename_axis('column').reset_index().set_index('column')) train.nunique(axis=0).rename('n_unique_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.nunique(axis=0).rename('n_unique_per_column_test').rename_axis('column').reset_index().set_index('column')) fig, ax = plt.subplots(1, 1, figsize=(20, 6)) sns.kdeplot(data=train, x='num_sold', hue='country', fill=True, alpha=0.15, ax=ax, linewidth=3, palette='pastel') ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([]) ax.set_title('Density plot for num_sold per country (clipped at 700)', fontweight='bold', fontsize=20)
code
106195418/cell_7
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) data = pd.concat([train, test], axis=0, ignore_index=True) assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data) assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data) // data['date'].nunique() display(train.sample(4))
code
106195418/cell_14
[ "text_html_output_1.png" ]
import matplotlib.dates as mdates import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import seaborn as sns import tensorboard as tb import tensorflow as tf import torch import copy from pathlib import Path import warnings import holidays import seaborn as sns import matplotlib import matplotlib.dates as mdates import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import numpy as np import pandas as pd import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import TensorBoardLogger import torch DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters import random import tensorflow as tf import tensorboard as tb tf.io.gfile = tb.compat.tensorflow_stub.io.gfile random.seed(30) np.random.seed(30) tf.random.set_seed(30) torch.manual_seed(30) torch.cuda.manual_seed(30) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) data = pd.concat([train, test], axis=0, ignore_index=True) assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data) assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data) // data['date'].nunique() train.isna().sum(axis=0).rename('nans_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.isna().sum(axis=0).rename('nans_per_column_test').rename_axis('column').reset_index().set_index('column')) train.nunique(axis=0).rename('n_unique_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.nunique(axis=0).rename('n_unique_per_column_test').rename_axis('column').reset_index().set_index('column')) fig, ax = plt.subplots(1,1, figsize=(20, 6)) sns.kdeplot(data=train, x = 'num_sold', hue = 'country', fill=True, alpha = 0.15, ax = ax, linewidth=3, palette='pastel') ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([]) ax.set_title('Density plot for num_sold per country (clipped at 700)', fontweight = 'bold', fontsize = 20); fig, ax = plt.subplots(1,1, figsize=(20, 6)) sns.kdeplot(data=train, x = 'num_sold', hue = 'store', fill=True, alpha = 0.15, ax = ax, linewidth=2.5) ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.set_title('Density plot for num_sold per store (clipped at 700)', fontweight = 'bold', fontsize = 20) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([]); fig, ax = plt.subplots(1,1, figsize=(20, 6)) sns.kdeplot(data=train, x = 'num_sold', hue = 'product', fill=True, alpha = 0.05, ax = ax, linewidth=2.5) ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.set_title('Density plot for num_sold per product (clipped at 700)', fontweight = 'bold', fontsize = 20) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([]); fig, ax = plt.subplots(1, 1, figsize=(20, 8)) sns.lineplot(x='date', y='num_sold', hue='country', data=train.groupby(['date', 'country']).num_sold.sum().rename('num_sold').reset_index().sort_values('date', ascending=True, ignore_index=True), linewidth=2, alpha=0.7) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m')) plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=120)) ax.set_xlabel('date', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.legend(fontsize=20, loc='upper left') ax.set_title('num_sold per Country and Date', fontweight='bold', fontsize=20)
code
106195418/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) data = pd.concat([train, test], axis=0, ignore_index=True) assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data) assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data) // data['date'].nunique() train.isna().sum(axis=0).rename('nans_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.isna().sum(axis=0).rename('nans_per_column_test').rename_axis('column').reset_index().set_index('column')) train.nunique(axis=0).rename('n_unique_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.nunique(axis=0).rename('n_unique_per_column_test').rename_axis('column').reset_index().set_index('column'))
code
106195418/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import seaborn as sns import tensorboard as tb import tensorflow as tf import torch import copy from pathlib import Path import warnings import holidays import seaborn as sns import matplotlib import matplotlib.dates as mdates import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import numpy as np import pandas as pd import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import TensorBoardLogger import torch DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters import random import tensorflow as tf import tensorboard as tb tf.io.gfile = tb.compat.tensorflow_stub.io.gfile random.seed(30) np.random.seed(30) tf.random.set_seed(30) torch.manual_seed(30) torch.cuda.manual_seed(30) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) data = pd.concat([train, test], axis=0, ignore_index=True) assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data) assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data) // data['date'].nunique() train.isna().sum(axis=0).rename('nans_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.isna().sum(axis=0).rename('nans_per_column_test').rename_axis('column').reset_index().set_index('column')) train.nunique(axis=0).rename('n_unique_per_column_train').rename_axis('column').reset_index().set_index('column').join(test.nunique(axis=0).rename('n_unique_per_column_test').rename_axis('column').reset_index().set_index('column')) fig, ax = plt.subplots(1,1, figsize=(20, 6)) sns.kdeplot(data=train, x = 'num_sold', hue = 'country', fill=True, alpha = 0.15, ax = ax, linewidth=3, palette='pastel') ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([]) ax.set_title('Density plot for num_sold per country (clipped at 700)', fontweight = 'bold', fontsize = 20); fig, ax = plt.subplots(1, 1, figsize=(20, 6)) sns.kdeplot(data=train, x='num_sold', hue='store', fill=True, alpha=0.15, ax=ax, linewidth=2.5) ax.set_xlabel('num_sold', color='black', fontweight='bold', fontsize=13) ax.set_ylabel('density', color='black', fontweight='bold', fontsize=13) ax.set_xlim(0, 700) ax.set_title('Density plot for num_sold per store (clipped at 700)', fontweight='bold', fontsize=20) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_ticklabels([])
code
106195418/cell_5
[ "image_output_1.png" ]
!pip install holidays
code
74046791/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('insurance.csv') data.head()
code
1004487/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 conn = sqlite3.connect('../input/database.sqlite') score_query = '\nSELECT reviewid, score\nFROM reviews\n' score_df = pd.read_sql_query(score_query, conn) genre_query = '\nSELECT *\nFROM genres\n' genre_df = pd.read_sql_query(genre_query, conn) conn.close() genre_df.fillna(value='Not specified', inplace=True) grouped = genre_df.groupby('reviewid') genre_df = grouped.aggregate(lambda x: set(x)) result = score_df.join(genre_df, on='reviewid') assert len(score_df) == len(result) popmean = result['score'].mean() plt.hist(score_df['score']) plt.show()
code
1004487/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 conn = sqlite3.connect('../input/database.sqlite') score_query = '\nSELECT reviewid, score\nFROM reviews\n' score_df = pd.read_sql_query(score_query, conn) genre_query = '\nSELECT *\nFROM genres\n' genre_df = pd.read_sql_query(genre_query, conn) conn.close() genre_df.fillna(value='Not specified', inplace=True) grouped = genre_df.groupby('reviewid') genre_df = grouped.aggregate(lambda x: set(x)) result = score_df.join(genre_df, on='reviewid') assert len(score_df) == len(result) popmean = result['score'].mean() means_and_counts = result.groupby(result['genre'].apply(tuple))['score'].agg(['count', 'mean']) assert means_and_counts['count'].sum() == len(result) means_and_counts = means_and_counts.sort_values('mean', ascending=False) means_and_counts = means_and_counts[means_and_counts['count'] > 50].reset_index() print(means_and_counts)
code
1004487/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import sqlite3 import matplotlib.pyplot as plt import scipy.stats as stats from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1004487/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.stats as stats import sqlite3 conn = sqlite3.connect('../input/database.sqlite') score_query = '\nSELECT reviewid, score\nFROM reviews\n' score_df = pd.read_sql_query(score_query, conn) genre_query = '\nSELECT *\nFROM genres\n' genre_df = pd.read_sql_query(genre_query, conn) conn.close() genre_df.fillna(value='Not specified', inplace=True) grouped = genre_df.groupby('reviewid') genre_df = grouped.aggregate(lambda x: set(x)) result = score_df.join(genre_df, on='reviewid') assert len(score_df) == len(result) popmean = result['score'].mean() means_and_counts = result.groupby(result['genre'].apply(tuple))['score'].agg(['count', 'mean']) assert means_and_counts['count'].sum() == len(result) means_and_counts = means_and_counts.sort_values('mean', ascending=False) means_and_counts = means_and_counts[means_and_counts['count'] > 50].reset_index() data = [] for index, row in means_and_counts.iterrows(): data.append(result[result['genre'].apply(tuple) == row['genre']].score.tolist()) stat, pvalue = stats.f_oneway(*data) print('One-way ANOVA on genre values:') print('F-stat: %f, p-value: %f' % (stat, pvalue))
code
1004487/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 conn = sqlite3.connect('../input/database.sqlite') score_query = '\nSELECT reviewid, score\nFROM reviews\n' score_df = pd.read_sql_query(score_query, conn) genre_query = '\nSELECT *\nFROM genres\n' genre_df = pd.read_sql_query(genre_query, conn) conn.close() genre_df.fillna(value='Not specified', inplace=True) grouped = genre_df.groupby('reviewid') genre_df = grouped.aggregate(lambda x: set(x)) result = score_df.join(genre_df, on='reviewid') assert len(score_df) == len(result) popmean = result['score'].mean() print('Mean of %d reviews: %f' % (result['reviewid'].count(), popmean)) print('Standard deviation of reviews: %f' % result['score'].std(ddof=0))
code
1004487/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.stats as stats import sqlite3 conn = sqlite3.connect('../input/database.sqlite') score_query = '\nSELECT reviewid, score\nFROM reviews\n' score_df = pd.read_sql_query(score_query, conn) genre_query = '\nSELECT *\nFROM genres\n' genre_df = pd.read_sql_query(genre_query, conn) conn.close() genre_df.fillna(value='Not specified', inplace=True) grouped = genre_df.groupby('reviewid') genre_df = grouped.aggregate(lambda x: set(x)) result = score_df.join(genre_df, on='reviewid') assert len(score_df) == len(result) popmean = result['score'].mean() means_and_counts = result.groupby(result['genre'].apply(tuple))['score'].agg(['count', 'mean']) assert means_and_counts['count'].sum() == len(result) means_and_counts = means_and_counts.sort_values('mean', ascending=False) means_and_counts = means_and_counts[means_and_counts['count'] > 50].reset_index() data = [] for index, row in means_and_counts.iterrows(): data.append(result[result['genre'].apply(tuple) == row['genre']].score.tolist()) stat, pvalue = stats.f_oneway(*data) t_tests_headers = ['genre', 't', 'prob', 'Reject_Null'] t_tests = pd.DataFrame(index=range(0, len(data)), columns=t_tests_headers) for index in range(len(data)): gs = ', '.join(means_and_counts['genre'][index]) t_tests['genre'][index] = gs t, prob = stats.ttest_1samp(data[index], popmean) t_tests['t'][index] = t t_tests['prob'][index] = prob if prob < 0.05: t_tests['Reject_Null'][index] = True else: t_tests['Reject_Null'][index] = False print(t_tests.sort_values('t'))
code
34123490/cell_7
[ "text_plain_output_1.png" ]
from scipy import stats avg_weights = [33, 34, 35, 36, 32, 28, 29, 30, 31, 37, 36, 35, 33, 34, 31, 40, 24] stats.ttest_1samp(avg_weights, 35)
code
34123490/cell_17
[ "text_plain_output_1.png" ]
from scipy import stats avg_weights = [33, 34, 35, 36, 32, 28, 29, 30, 31, 37, 36, 35, 33, 34, 31, 40, 24] stats.ttest_1samp(avg_weights, 35) avg_weights1 = [29, 31, 28, 33, 31, 34, 32, 20, 32, 28, 27, 26, 30, 31, 34, 30] stats.ttest_ind(avg_weights, avg_weights1) before_meta = [68, 45, 46, 34, 23, 67, 80, 120, 34, 54, 68] after_meta = [28, 25, 26, 24, 13, 37, 30, 30, 54, 34, 38] stats.ttest_rel(before_meta, after_meta)
code
34123490/cell_12
[ "text_plain_output_1.png" ]
from scipy import stats avg_weights = [33, 34, 35, 36, 32, 28, 29, 30, 31, 37, 36, 35, 33, 34, 31, 40, 24] stats.ttest_1samp(avg_weights, 35) avg_weights1 = [29, 31, 28, 33, 31, 34, 32, 20, 32, 28, 27, 26, 30, 31, 34, 30] stats.ttest_ind(avg_weights, avg_weights1)
code
88086201/cell_13
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values)
code
88086201/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) df.info()
code
88086201/cell_23
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.manifold import TSNE import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('ggplot') root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) from sklearn.decomposition import PCA pca = PCA(n_components=0.95, random_state=42) X_reduced = pca.fit_transform(X.toarray()) X_reduced.shape from sklearn.manifold import TSNE tsne = TSNE(verbose=1, perplexity=50) X_embedded = tsne.fit_transform(X.toarray()) from matplotlib import pyplot as plt import seaborn as sns sns.set(rc={'figure.figsize': (15, 15)}) palette = sns.color_palette('bright', 1) sns.scatterplot(X_embedded[:, 0], X_embedded[:, 1], palette=palette) plt.title('t-SNE with no Labels') plt.savefig('t-sne_chants.png') plt.show()
code
88086201/cell_33
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.decomposition import LatentDirichletAllocation from sklearn.decomposition import PCA from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) from sklearn.decomposition import PCA pca = PCA(n_components=0.95, random_state=42) X_reduced = pca.fit_transform(X.toarray()) X_reduced.shape k = 15 kmeans = KMeans(n_clusters=k, random_state=42) y_pred = kmeans.fit_predict(X_reduced) df['y'] = y_pred vectorizers = [] for ii in range(0, k): vectorizers.append(CountVectorizer(min_df=5, max_df=0.9, stop_words='english', lowercase=True, token_pattern='[a-zA-Z\\-][a-zA-Z\\-]{2,}')) vectorized_data = [] for current_cluster, cvec in enumerate(vectorizers): try: vectorized_data.append(cvec.fit_transform(df.loc[df['y'] == current_cluster, 'music_as_words'])) except Exception as e: vectorized_data.append(None) NUM_TOPICS_PER_CLUSTER = 20 lda_models = [] for ii in range(0, k): lda = LatentDirichletAllocation(n_components=NUM_TOPICS_PER_CLUSTER, max_iter=10, learning_method='online', verbose=False, random_state=42) lda_models.append(lda) lda_models[0] clusters_lda_data = [] for current_cluster, lda in enumerate(lda_models): print('Current Cluster: ' + str(current_cluster)) if vectorized_data[current_cluster] != None: clusters_lda_data.append(lda.fit_transform(vectorized_data[current_cluster]))
code
88086201/cell_20
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.manifold import TSNE import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) from sklearn.decomposition import PCA pca = PCA(n_components=0.95, random_state=42) X_reduced = pca.fit_transform(X.toarray()) X_reduced.shape from sklearn.manifold import TSNE tsne = TSNE(verbose=1, perplexity=50) X_embedded = tsne.fit_transform(X.toarray())
code
88086201/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) def joinSyllable(c): out = '' for doc in c: out += ' '.join(doc) return out df['music_as_words'] = list(map(joinSyllable, df['music'])) df['music_as_words']
code
88086201/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) df.head()
code
88086201/cell_32
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.decomposition import LatentDirichletAllocation from sklearn.decomposition import PCA from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) from sklearn.decomposition import PCA pca = PCA(n_components=0.95, random_state=42) X_reduced = pca.fit_transform(X.toarray()) X_reduced.shape k = 15 kmeans = KMeans(n_clusters=k, random_state=42) y_pred = kmeans.fit_predict(X_reduced) df['y'] = y_pred NUM_TOPICS_PER_CLUSTER = 20 lda_models = [] for ii in range(0, k): lda = LatentDirichletAllocation(n_components=NUM_TOPICS_PER_CLUSTER, max_iter=10, learning_method='online', verbose=False, random_state=42) lda_models.append(lda) lda_models[0]
code
88086201/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) from sklearn.decomposition import PCA pca = PCA(n_components=0.95, random_state=42) X_reduced = pca.fit_transform(X.toarray()) X_reduced.shape
code
88086201/cell_35
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.decomposition import LatentDirichletAllocation from sklearn.decomposition import PCA from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) vectorizer.get_feature_names()[:10] from sklearn.decomposition import PCA pca = PCA(n_components=0.95, random_state=42) X_reduced = pca.fit_transform(X.toarray()) X_reduced.shape k = 15 kmeans = KMeans(n_clusters=k, random_state=42) y_pred = kmeans.fit_predict(X_reduced) df['y'] = y_pred vectorizers = [] for ii in range(0, k): vectorizers.append(CountVectorizer(min_df=5, max_df=0.9, stop_words='english', lowercase=True, token_pattern='[a-zA-Z\\-][a-zA-Z\\-]{2,}')) vectorized_data = [] for current_cluster, cvec in enumerate(vectorizers): try: vectorized_data.append(cvec.fit_transform(df.loc[df['y'] == current_cluster, 'music_as_words'])) except Exception as e: vectorized_data.append(None) NUM_TOPICS_PER_CLUSTER = 20 lda_models = [] for ii in range(0, k): lda = LatentDirichletAllocation(n_components=NUM_TOPICS_PER_CLUSTER, max_iter=10, learning_method='online', verbose=False, random_state=42) lda_models.append(lda) lda_models[0] clusters_lda_data = [] for current_cluster, lda in enumerate(lda_models): if vectorized_data[current_cluster] != None: clusters_lda_data.append(lda.fit_transform(vectorized_data[current_cluster])) def selected_topics(model, vectorizer, top_n=3): current_words = [] keywords = [] for idx, topic in enumerate(model.components_): words = [(vectorizer.get_feature_names()[i], topic[i]) for i in topic.argsort()[:-top_n - 1:-1]] for word in words: if word[0] not in current_words: keywords.append(word) current_words.append(word[0]) keywords.sort(key=lambda x: x[1]) keywords.reverse() return_values = [] for ii in keywords: return_values.append(ii[0]) return return_values all_keywords = [] for current_vectorizer, lda in enumerate(lda_models): print('Current Cluster: ' + str(current_vectorizer)) if vectorized_data[current_vectorizer] != None: all_keywords.append(selected_topics(lda, vectorizers[current_vectorizer]))
code
88086201/cell_24
[ "text_plain_output_1.png" ]
from matplotlib import pyplot as plt import seaborn as sns sns.set(rc={'figure.figsize': (13, 9)}) palette = sns.hls_palette(k, l=0.4, s=0.9) sns.scatterplot(X_embedded[:, 0], X_embedded[:, 1], hue=y_pred, legend='full', palette=palette) plt.title('t-SNE with Kmeans Labels') plt.savefig('improved_cluster_tsne.png') plt.show()
code
88086201/cell_14
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) vectorizer.get_feature_names()[:10]
code
88086201/cell_27
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) df[df['y'] == 1]
code
88086201/cell_36
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.decomposition import LatentDirichletAllocation from sklearn.decomposition import PCA from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) root_path = '/kaggle/input/medieval-chant-corpus' df_path = f'{root_path}/mh-corpus.json' df = pd.read_json(df_path) df.drop(['rawTextMusic', 'rawText'], axis=1, inplace=True) from sklearn.feature_extraction.text import TfidfVectorizer maxx_features = 2 ** 12 vectorizer = TfidfVectorizer(max_features=maxx_features) X = vectorizer.fit_transform(df['music_as_words'].values) vectorizer.get_feature_names()[:10] from sklearn.decomposition import PCA pca = PCA(n_components=0.95, random_state=42) X_reduced = pca.fit_transform(X.toarray()) X_reduced.shape k = 15 kmeans = KMeans(n_clusters=k, random_state=42) y_pred = kmeans.fit_predict(X_reduced) df['y'] = y_pred vectorizers = [] for ii in range(0, k): vectorizers.append(CountVectorizer(min_df=5, max_df=0.9, stop_words='english', lowercase=True, token_pattern='[a-zA-Z\\-][a-zA-Z\\-]{2,}')) vectorized_data = [] for current_cluster, cvec in enumerate(vectorizers): try: vectorized_data.append(cvec.fit_transform(df.loc[df['y'] == current_cluster, 'music_as_words'])) except Exception as e: vectorized_data.append(None) NUM_TOPICS_PER_CLUSTER = 20 lda_models = [] for ii in range(0, k): lda = LatentDirichletAllocation(n_components=NUM_TOPICS_PER_CLUSTER, max_iter=10, learning_method='online', verbose=False, random_state=42) lda_models.append(lda) lda_models[0] clusters_lda_data = [] for current_cluster, lda in enumerate(lda_models): if vectorized_data[current_cluster] != None: clusters_lda_data.append(lda.fit_transform(vectorized_data[current_cluster])) def selected_topics(model, vectorizer, top_n=3): current_words = [] keywords = [] for idx, topic in enumerate(model.components_): words = [(vectorizer.get_feature_names()[i], topic[i]) for i in topic.argsort()[:-top_n - 1:-1]] for word in words: if word[0] not in current_words: keywords.append(word) current_words.append(word[0]) keywords.sort(key=lambda x: x[1]) keywords.reverse() return_values = [] for ii in keywords: return_values.append(ii[0]) return return_values all_keywords = [] for current_vectorizer, lda in enumerate(lda_models): if vectorized_data[current_vectorizer] != None: all_keywords.append(selected_topics(lda, vectorizers[current_vectorizer])) all_keywords[0][:10]
code
90124932/cell_9
[ "text_plain_output_1.png" ]
from keras.models import Sequential,Model,load_model,Input from keras_preprocessing.image import ImageDataGenerator import math import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tensorflow_addons as tfa import pandas as pd import os from glob import glob import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 from keras.models import Model import numpy as np import pandas as pd import matplotlib.pyplot as plt import os from glob import glob import numpy as np from keras import regularizers from keras.models import Sequential, Model, load_model, Input from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D from keras_preprocessing.image import ImageDataGenerator import keras.layers as Layers from keras.callbacks import EarlyStopping, ModelCheckpoint import keras.optimizers as Optimizer from keras import applications from tensorflow import keras import math import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import tensorflow_addons as tfa train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv' train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None) train_images_paths.columns = ['image_path'] train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0') train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2]) valid_img_csv = '../input/testdata/abdekho_valid.csv' valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None) valid_images_paths.columns = ['image_path'] valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0') valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2]) train_images_paths_XR_ELBOW = train_images_paths[train_images_paths['category'] == 'XR_ELBOW'] valid_images_paths_XR_ELBOW = valid_images_paths[valid_images_paths['category'] == 'XR_ELBOW'] train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER'] valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER'] train_images_paths_XR_FOREARM = train_images_paths[train_images_paths['category'] == 'XR_FOREARM'] valid_images_paths_XR_FOREARM = valid_images_paths[valid_images_paths['category'] == 'XR_FOREARM'] train_images_paths_XR_HAND = train_images_paths[train_images_paths['category'] == 'XR_HAND'] valid_images_paths_XR_HAND = valid_images_paths[valid_images_paths['category'] == 'XR_HAND'] train_images_paths_XR_HUMERUS = train_images_paths[train_images_paths['category'] == 'XR_HUMERUS'] valid_images_paths_XR_HUMERUS = valid_images_paths[valid_images_paths['category'] == 'XR_HUMERUS'] train_images_paths_XR_SHOULDER = train_images_paths[train_images_paths['category'] == 'XR_SHOULDER'] valid_images_paths_XR_SHOULDER = valid_images_paths[valid_images_paths['category'] == 'XR_SHOULDER'] train_images_paths_XR_WRIST = train_images_paths[train_images_paths['category'] == 'XR_WRIST'] valid_images_paths_XR_WRIST = valid_images_paths[valid_images_paths['category'] == 'XR_WRIST'] datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10) images_path_dir = '../input/mura-dataset' batchsize = 32 targetsize = (224, 224) classmode = 'binary' train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator = test_datagen.flow_from_dataframe(dataframe=valid_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) train_generator_XR_ELBOW = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator_XR_ELBOW = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) train_generator_XR_FINGER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator_XR_FINGER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) train_generator_XR_FOREARM = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator_XR_FOREARM = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) train_generator_XR_HAND = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator_XR_HAND = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) train_generator_XR_HUMERUS = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator_XR_HUMERUS = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) train_generator_XR_SHOULDER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator_XR_SHOULDER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) train_generator_XR_WRIST = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) valid_generator_XR_WRIST = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) input_image = Input(shape=(224, 224, 3), name='original_img') dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet') dense_model_1.trainable = True for layer in dense_model_1.layers[:350]: layer.trainable = False x = dense_model_1(input_image) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dense(81, activation='relu')(x) x = tf.keras.layers.Dense(81, activation='relu')(x) x = tf.keras.layers.Dense(42, activation='relu')(x) preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x) dense_model_2 = tf.keras.applications.Xception(weights='imagenet', include_top=False) dense_model_2.trainable = True for layer in dense_model_2.layers[:116]: layer.trainable = False y = dense_model_2(input_image) y = tf.keras.layers.GlobalAveragePooling2D()(y) y = tf.keras.layers.Dense(81, activation='relu')(y) y = tf.keras.layers.Dense(81, activation='relu')(y) y = tf.keras.layers.Dense(42, activation='relu')(y) preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y) dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet') dense_model_3.trainable = True for layer in dense_model_3.layers[:70]: layer.trainable = False z = dense_model_3(input_image) z = tf.keras.layers.GlobalAveragePooling2D()(z) z = tf.keras.layers.Dense(81, activation='relu')(z) z = tf.keras.layers.Dense(81, activation='relu')(z) z = tf.keras.layers.Dense(42, activation='relu')(z) preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z) mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0) model = tf.keras.models.Model(input_image, mean_nn_only) STEP_SIZE_TRAIN = math.ceil(train_generator.n / train_generator.batch_size) STEP_SIZE_VALID = math.ceil(valid_generator.n / valid_generator.batch_size) model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy', tfa.metrics.CohenKappa(num_classes=2), tf.keras.metrics.Precision(0.6), tf.keras.metrics.Recall(0.3), tf.keras.metrics.AUC()]) history = model.fit_generator(train_generator, epochs=20, verbose=1, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID) print('\nOverAll\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1) print('\n\n_XR_ELBOW\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator_XR_ELBOW, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator_XR_ELBOW, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1) print('\n\n_XR_FINGER\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator_XR_FINGER, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator_XR_FINGER, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1) print('\n\n_XR_FOREARM\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator_XR_FOREARM, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator_XR_FOREARM, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1) print('\n\n_XR_HAND\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator_XR_HAND, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator_XR_HAND, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1) print('\n\n_XR_HUMERUS\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator_XR_HUMERUS, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator_XR_HUMERUS, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1) print('\n\n_XR_SHOULDER\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator_XR_SHOULDER, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator_XR_SHOULDER, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1) print('\n\n_XR_WRIST\n\n') loss, acc, kappa, pre, recal, auc = model.evaluate(train_generator_XR_WRIST, verbose=1) print('accuracy overall: %.3f' % acc) print(' kappa overall: %.3f' % kappa) print(' precision overall: %.3f' % pre) print('recall overall: %.3f' % recal) print('AUC overall: %.3f' % auc) loss1, acc1, kappa1, pre1, recal1, auc1 = model.evaluate(valid_generator_XR_WRIST, verbose=1) print('accuracy valid: %.3f' % acc1) print(' kappa valid: %.3f' % kappa1) print(' precision valid: %.3f' % pre1) print('recall valid: %.3f' % recal1) print('AUC valid: %.3f' % auc1)
code