path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17134452/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) print(serp_clubs.shape) serp_clubs.head(2)
code
17134452/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) serp_clubs.drop_duplicates(['searchTerms']).groupby('searchTerms', as_index=False).agg({'totalResults': 'sum'}).sort_values('totalResults', ascending=False).reset_index(drop=True).head(15).style.format({'totalResults': '{:,}'})
code
17134452/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__
code
17134452/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') top_countries = clubs.groupby('Country').agg({'Total': 'sum'}).sort_values('Total', ascending=False).reset_index().head(10) top_countries clubs.groupby(['Country']).agg({'Club': 'count', 'Total': 'sum'}).sort_values('Club', ascending=False).reset_index().head(9).set_axis(['country', 'num_clubs', 'total_wins'], axis=1, inplace=False).assign(wins_per_club=lambda df: df['total_wins'].div(df['num_clubs'])).style.background_gradient(high=0.2) clubs_list = clubs['Club'].str.lower().tolist() clubs_list[:10]
code
17134452/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) adv.word_frequency(serp_clubs['snippet'].fillna(''), phrase_len=2, rm_words=adv.stopwords['english'].union(['-', '|', ' ', '', 'de'])).head(20)
code
17134452/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
lang_football = {'en': 'football', 'fr': 'football', 'de': 'fußball', 'es': 'fútbol', 'it': 'calcio', 'pt-BR': 'futebol', 'nl': 'voetbal'} lang_football
code
17134452/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) (serp_clubs['snippet'].isna().sum(), serp_clubs['title'].isna().sum())
code
17134452/cell_16
[ "text_plain_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) adv.word_frequency(serp_clubs['snippet'].fillna(''), rm_words=adv.stopwords['english'].union(['-', '|', ' ', '·', '', 'de'])).head(15)
code
17134452/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key
code
17134452/cell_17
[ "text_plain_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) adv.word_frequency(serp_clubs[serp_clubs['hl'] == 'en']['snippet'].fillna(''), rm_words=adv.stopwords['english'].union(['-', '|', ' ', '·', '', 'de'])).head(15)
code
17134452/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode from plotly.tools import make_subplots import advertools as adv import pandas as pd import plotly.graph_objs as go import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') top_countries = clubs.groupby('Country').agg({'Total': 'sum'}).sort_values('Total', ascending=False).reset_index().head(10) top_countries clubs.groupby(['Country']).agg({'Club': 'count', 'Total': 'sum'}).sort_values('Club', ascending=False).reset_index().head(9).set_axis(['country', 'num_clubs', 'total_wins'], axis=1, inplace=False).assign(wins_per_club=lambda df: df['total_wins'].div(df['num_clubs'])).style.background_gradient(high=0.2) lang_football = {'en': 'football', 'fr': 'football', 'de': 'fußball', 'es': 'fútbol', 'it': 'calcio', 'pt-BR': 'futebol', 'nl': 'voetbal'} lang_football serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) club_country = {club.lower(): country.lower() for club, country in zip(clubs['Club'], clubs['Country'])} football_multi = '|'.join([' ' + football for football in lang_football.values()]) serp_clubs['country'] = [club_country[club].title() for club in serp_clubs['searchTerms'].str.replace(football_multi, '')] serp_clubs[['searchTerms', 'country']].sample(10) serp_clubs.drop_duplicates(['searchTerms']).groupby('searchTerms', as_index=False).agg({'totalResults': 'sum'}).sort_values('totalResults', ascending=False).reset_index(drop=True).head(15).style.format({'totalResults': '{:,}'}) hl_domain_appearances = serp_clubs.groupby(['hl', 'displayLink']).agg({'rank': 'count'}).reset_index().sort_values(['hl', 'rank'], ascending=False).rename(columns={'rank': 'search_appearances'}) hl_domain_appearances.groupby(['hl']).head(5) fig = make_subplots(1, 7, print_grid=False, shared_yaxes=True) for i, lang in enumerate(serp_clubs['hl'].unique()[:7]): df = serp_clubs[serp_clubs['hl']==lang] fig.append_trace(go.Bar(y=df['displayLink'].value_counts().values[:8], x=df['displayLink'].value_counts().index.str.replace('www.', '')[:8], name=lang, orientation='v'), row=1, col=i+1) fig.layout.margin = {'b': 150, 'r': 30} fig.layout.legend.orientation = 'h' fig.layout.legend.y = -0.5 fig.layout.legend.x = 0.15 fig.layout.title = 'Top Domains by Language of Search' fig.layout.yaxis.title = 'Number of Appearances on SERPs' fig.layout.plot_bgcolor = '#eeeeee' fig.layout.paper_bgcolor = '#eeeeee' iplot(fig) fig = make_subplots(1, 7, shared_yaxes=True, print_grid=False) for i, country in enumerate(serp_clubs['country'].unique()[:7]): if country in top_countries['Country'][:7].values: df = serp_clubs[serp_clubs['country']==country] fig.append_trace(go.Bar(y=df['displayLink'].value_counts().values[:8], x=df['displayLink'].value_counts().index.str.replace('www.', '')[:8], name=country, orientation='v'), row=1, col=i+1) fig.layout.margin = {'b': 150, 'r': 0} fig.layout.legend.orientation = 'h' fig.layout.legend.y = -0.5 fig.layout.legend.x = 0.15 fig.layout.title = 'Top Domains by Country of Club' fig.layout.yaxis.title = 'Number of Appearances on SERPs' fig.layout.plot_bgcolor = '#eeeeee' fig.layout.paper_bgcolor = '#eeeeee' iplot(fig) def plot_serps(df, opacity=0.1, num_domains=10, width=None, height=700): """ df: a DataFrame resulting from running advertools.serp_goog opacity: the opacity of the markers [0, 1] num_domains: how many domains to plot """ top_domains = df['displayLink'].value_counts()[:num_domains].index.tolist() top_df = df[df['displayLink'].isin(top_domains)] top_df_counts_means = top_df.groupby('displayLink', as_index=False).agg({'rank': ['count', 'mean']}).set_axis(['displayLink', 'rank_count', 'rank_mean'], axis=1, inplace=False) top_df = pd.merge(top_df, top_df_counts_means).sort_values(['rank_count', 'rank_mean'], ascending=[False, True]) rank_counts = top_df.groupby(['displayLink', 'rank']).agg({'rank': ['count']}).reset_index().set_axis(['displayLink', 'rank', 'count'], axis=1, inplace=False) num_queries = df['queryTime'].nunique() fig = go.Figure() fig.layout.height = 600 fig.layout.yaxis.autorange = 'reversed' fig.layout.yaxis.zeroline = False for domain in rank_counts['displayLink'].unique(): rank_counts_subset = rank_counts[rank_counts['displayLink'] == domain] fig.layout.title = 'Google Search Results Rankings<br>keyword(s): ' + str(df['queryTime'].nunique()) + ' Football (Soccer) Queries' fig.layout.hovermode = False fig.layout.yaxis.autorange = 'reversed' fig.layout.yaxis.zeroline = False fig.layout.yaxis.tickvals = list(range(1, 14)) fig.layout.yaxis.ticktext = list(range(1, 11)) + ['Total<br>appearances', 'Coverage', 'Avg. Pos.'] fig.layout.height = height fig.layout.width = width fig.layout.yaxis.title = 'SERP Rank (number of appearances)' fig.layout.showlegend = False fig.layout.paper_bgcolor = '#eeeeee' fig.layout.plot_bgcolor = '#eeeeee' plot_serps(serp_clubs[serp_clubs['hl'] == 'es'], opacity=0.1)
code
17134452/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) adv.word_frequency(serp_clubs['title'], rm_words=adv.stopwords['english'].union(['-', '|', ' ', ''])).head(10)
code
17134452/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode from plotly.tools import make_subplots import advertools as adv import pandas as pd import plotly.graph_objs as go import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') top_countries = clubs.groupby('Country').agg({'Total': 'sum'}).sort_values('Total', ascending=False).reset_index().head(10) top_countries clubs.groupby(['Country']).agg({'Club': 'count', 'Total': 'sum'}).sort_values('Club', ascending=False).reset_index().head(9).set_axis(['country', 'num_clubs', 'total_wins'], axis=1, inplace=False).assign(wins_per_club=lambda df: df['total_wins'].div(df['num_clubs'])).style.background_gradient(high=0.2) lang_football = {'en': 'football', 'fr': 'football', 'de': 'fußball', 'es': 'fútbol', 'it': 'calcio', 'pt-BR': 'futebol', 'nl': 'voetbal'} lang_football serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) club_country = {club.lower(): country.lower() for club, country in zip(clubs['Club'], clubs['Country'])} football_multi = '|'.join([' ' + football for football in lang_football.values()]) serp_clubs['country'] = [club_country[club].title() for club in serp_clubs['searchTerms'].str.replace(football_multi, '')] serp_clubs[['searchTerms', 'country']].sample(10) serp_clubs.drop_duplicates(['searchTerms']).groupby('searchTerms', as_index=False).agg({'totalResults': 'sum'}).sort_values('totalResults', ascending=False).reset_index(drop=True).head(15).style.format({'totalResults': '{:,}'}) hl_domain_appearances = serp_clubs.groupby(['hl', 'displayLink']).agg({'rank': 'count'}).reset_index().sort_values(['hl', 'rank'], ascending=False).rename(columns={'rank': 'search_appearances'}) hl_domain_appearances.groupby(['hl']).head(5) fig = make_subplots(1, 7, print_grid=False, shared_yaxes=True) for i, lang in enumerate(serp_clubs['hl'].unique()[:7]): df = serp_clubs[serp_clubs['hl']==lang] fig.append_trace(go.Bar(y=df['displayLink'].value_counts().values[:8], x=df['displayLink'].value_counts().index.str.replace('www.', '')[:8], name=lang, orientation='v'), row=1, col=i+1) fig.layout.margin = {'b': 150, 'r': 30} fig.layout.legend.orientation = 'h' fig.layout.legend.y = -0.5 fig.layout.legend.x = 0.15 fig.layout.title = 'Top Domains by Language of Search' fig.layout.yaxis.title = 'Number of Appearances on SERPs' fig.layout.plot_bgcolor = '#eeeeee' fig.layout.paper_bgcolor = '#eeeeee' iplot(fig) fig = make_subplots(1, 7, shared_yaxes=True, print_grid=False) for i, country in enumerate(serp_clubs['country'].unique()[:7]): if country in top_countries['Country'][:7].values: df = serp_clubs[serp_clubs['country'] == country] fig.append_trace(go.Bar(y=df['displayLink'].value_counts().values[:8], x=df['displayLink'].value_counts().index.str.replace('www.', '')[:8], name=country, orientation='v'), row=1, col=i + 1) fig.layout.margin = {'b': 150, 'r': 0} fig.layout.legend.orientation = 'h' fig.layout.legend.y = -0.5 fig.layout.legend.x = 0.15 fig.layout.title = 'Top Domains by Country of Club' fig.layout.yaxis.title = 'Number of Appearances on SERPs' fig.layout.plot_bgcolor = '#eeeeee' fig.layout.paper_bgcolor = '#eeeeee' iplot(fig)
code
17134452/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') top_countries = clubs.groupby('Country').agg({'Total': 'sum'}).sort_values('Total', ascending=False).reset_index().head(10) top_countries clubs.groupby(['Country']).agg({'Club': 'count', 'Total': 'sum'}).sort_values('Club', ascending=False).reset_index().head(9).set_axis(['country', 'num_clubs', 'total_wins'], axis=1, inplace=False).assign(wins_per_club=lambda df: df['total_wins'].div(df['num_clubs'])).style.background_gradient(high=0.2) lang_football = {'en': 'football', 'fr': 'football', 'de': 'fußball', 'es': 'fútbol', 'it': 'calcio', 'pt-BR': 'futebol', 'nl': 'voetbal'} lang_football serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) club_country = {club.lower(): country.lower() for club, country in zip(clubs['Club'], clubs['Country'])} football_multi = '|'.join([' ' + football for football in lang_football.values()]) serp_clubs['country'] = [club_country[club].title() for club in serp_clubs['searchTerms'].str.replace(football_multi, '')] serp_clubs[['searchTerms', 'country']].sample(10)
code
17134452/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') top_countries = clubs.groupby('Country').agg({'Total': 'sum'}).sort_values('Total', ascending=False).reset_index().head(10) top_countries
code
128044650/cell_30
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
!pip install opentsne from openTSNE import TSNE if str_data_inf != 'Medulloblastoma Intergrated GSE124814': # Fail in that case by unclear reason from openTSNE import TSNE reducer = TSNE( perplexity=30, metric="euclidean", n_jobs=8, random_state=42, verbose=True, ) r = reducer.fit(X) for (i,j) in [(0,1)]:#,(0,2),(1,2),(2,3),(2,4),(3,4)]: fig = plt.figure(figsize = (20,12) ); c = 0 ax = sns.scatterplot(x = r[:,i],y=r[:,j], hue = v4color, palette = palette1 , marker = marker1 , alpha = alpha1 ) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) # for legend text plt.setp(ax.get_legend().get_title(), fontsize=12) # for legend title plt.title(' openTSNE '+ str_data_inf + ' n_samples='+str(len(r)), fontsize = 20) plt.show()
code
128044650/cell_44
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_7.png", "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import os import time import time import time t0start = time.time() import numpy as np import pandas as pd import os print('%.1f seconds passed total ' % (time.time() - t0start))
code
128044650/cell_20
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png", "image_output_1.png" ]
!pip install trimap import trimap reducer = trimap.TRIMAP() r = reducer.fit_transform(X) for (i,j) in [(0,1)]:#,(0,2),(1,2),(2,3),(2,4),(3,4)]: fig = plt.figure(figsize = (20,12) ); c = 0 ax = sns.scatterplot(x = r[:,i],y=r[:,j], hue = v4color, palette = palette1, marker = marker1 , alpha = alpha1 ) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=20) # for legend text plt.setp(ax.get_legend().get_title(), fontsize=20) # for legend title plt.title(' trimap ' + str_data_inf + ' n_samples='+str(len(r)), fontsize = 20) plt.show()
code
128044650/cell_40
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.manifold import Isomap fig = plt.figure(figsize=(25, 8)) plt.suptitle(str_data_inf + ' n_samples=' + str(len(X)), fontsize=20) c = 0 for n_neighbors in [5, 10]: reducer = Isomap(n_components=2, n_neighbors=n_neighbors) t0 = time.time() try: if isinstance(X, pd.DataFrame): r = reducer.fit_transform(X.iloc[:N, :]) else: r = reducer.fit_transform(X[:N, :]) except Exception as e: print(e) print('Got Exception', n_neighbors) continue t1 = time.time() print('Isomap n_neighbors %s: %.2g sec' % (n_neighbors, t1 - t0)) c += 1 fig.add_subplot(1, 2, c) if isinstance(v4color, pd.Series): sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color.iloc[:N], palette=palette1, marker=marker1, alpha=alpha1) else: sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color[:N], palette=palette1, marker=marker1, alpha=alpha1) plt.title('Isomap n_neighbors' + str(n_neighbors), fontsize=20) if c == 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.legend(bbox_to_anchor=(1.15, 1.0), loc='upper right') else: plt.legend('') plt.show()
code
128044650/cell_26
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import umap reducer = umap.UMAP(densmap=True, random_state=42) r2 = reducer.fit_transform(X) print(X.shape) for i, j in [(0, 1)]: plt.figure(figsize=(20, 10)) ax = sns.scatterplot(x=r2[:, i], y=r2[:, j], hue=v4color, palette=palette1, marker=marker1, alpha=alpha1) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=20) plt.setp(ax.get_legend().get_title(), fontsize=20) plt.title('UMAP densmap On ' + str_data_inf + ' n_samples=' + str(len(X)), fontsize=20) plt.xlabel('UMAP' + str(i + 1), fontsize=12) plt.ylabel('UMAP' + str(j + 1), fontsize=12) plt.show()
code
128044650/cell_2
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import os import time import time t0start = time.time() import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if 'png' not in filename: print(os.path.join(dirname, filename))
code
128044650/cell_18
[ "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
!pip install pacmap import pacmap reducer = pacmap.PaCMAP() r = reducer.fit_transform(X) for (i,j) in [(0,1)]:#,(0,2),(1,2),(2,3),(2,4),(3,4)]: fig = plt.figure(figsize = (20,12) ); c = 0 ax = sns.scatterplot(x = r[:,i],y=r[:,j], hue = v4color, palette = palette1, marker = marker1 , alpha = alpha1 ) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=20) # for legend text plt.setp(ax.get_legend().get_title(), fontsize=20) # for legend title plt.title(' pacmap ' + str_data_inf + ' n_samples='+str(len(r)), fontsize = 20) plt.show()
code
128044650/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import time import umap from sklearn import manifold from sklearn.decomposition import PCA from sklearn.decomposition import FactorAnalysis from sklearn.decomposition import NMF from sklearn.decomposition import FastICA from sklearn.decomposition import FactorAnalysis from sklearn.decomposition import LatentDirichletAllocation from sklearn.ensemble import RandomTreesEmbedding from sklearn.random_projection import SparseRandomProjection from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.pipeline import make_pipeline from sklearn.decomposition import TruncatedSVD from collections import OrderedDict from functools import partial from matplotlib.ticker import NullFormatter n_neighbors = 10 n_components = 2 LLE = partial(manifold.LocallyLinearEmbedding, n_neighbors, n_components, eigen_solver='auto') methods = OrderedDict() methods['PCA'] = PCA() methods['umap'] = umap.UMAP(n_components=n_components) methods['t-SNE'] = manifold.TSNE(n_components=n_components, init='pca', random_state=0) methods['ICA'] = FastICA(n_components=n_components, random_state=0) methods['FA'] = FactorAnalysis(n_components=n_components, random_state=0) methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1) methods['SE'] = manifold.SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors) methods['NMF'] = NMF(n_components=n_components, init='random', random_state=0) methods['RandProj'] = SparseRandomProjection(n_components=n_components, random_state=42) rand_trees_embed = make_pipeline(RandomTreesEmbedding(n_estimators=200, random_state=0, max_depth=5), TruncatedSVD(n_components=n_components)) methods['RandTrees'] = rand_trees_embed methods['LatDirAll'] = LatentDirichletAllocation(n_components=n_components, random_state=0) list_fast_methods = ['FA', 'RandProj', 'RandTrees'] list_slow_methods = ['t-SNE', 'LLE', 'Modified LLE', 'Isomap', 'MDS', 'SE', 'LatDirAll', 'LTSA', 'Hessian LLE'] fig = plt.figure(figsize=(25, 16)) plt.suptitle(str_data_inf + ' n_samples=' + str(len(X)), fontsize=20) c = 0 for i, (label, method) in enumerate(methods.items()): if label not in list_fast_methods: continue t0 = time.time() try: r = method.fit_transform(X) except: print('Got Exception', label) continue t1 = time.time() print('%s: %.2g sec' % (label, t1 - t0)) c += 1 fig.add_subplot(2, 4, c) sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color, palette=palette1, marker=marker1, alpha=alpha1) plt.title(label, fontsize=20) if c == 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.legend(bbox_to_anchor=(1.15, 1.0), loc='upper right') else: plt.legend('') plt.show()
code
128044650/cell_28
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
fig = plt.figure(figsize=(20, 16)) c = 0 cc = 0 plt.suptitle('UMAP densmap ' + str_data_inf + ' n_samples=' + str(len(X)), fontsize=20) for min_dist in [0.1, 0.9]: for n_neighbors in [5, 15, 100]: c += 1 fig.add_subplot(2, 3, c) str_inf = 'n_neighbors=' + str(n_neighbors) + ' min_dist=' + str(min_dist) reducer = umap.UMAP(densmap=True, n_neighbors=n_neighbors, min_dist=min_dist, n_components=2) r2 = reducer.fit_transform(X) i, j = (0, 1) ax = sns.scatterplot(x=r2[:, i], y=r2[:, j], hue=v4color, palette=palette1, marker=marker1, alpha=alpha1) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.title(str_inf, fontsize=20) plt.xlabel('UMAP' + str(i + 1), fontsize=20) plt.ylabel('UMAP' + str(j + 1), fontsize=20) if c == 1: plt.setp(ax.get_legend().get_texts(), fontsize=10) plt.setp(ax.get_legend().get_title(), fontsize=10) plt.legend(bbox_to_anchor=(1.15, 1.0), loc='upper right') else: plt.legend('') plt.show()
code
128044650/cell_8
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA pca = PCA(n_components=6) r = pca.fit_transform(X) print(np.sum(pca.explained_variance_ratio_))
code
128044650/cell_16
[ "image_output_5.png", "image_output_4.png", "image_output_6.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
!pip install ncvis import ncvis reducer = ncvis.NCVis() r = reducer.fit_transform(X) for (i,j) in [(0,1)]:#,(0,2),(1,2),(2,3),(2,4),(3,4)]: fig = plt.figure(figsize = (20,12) ); c = 0 ax = sns.scatterplot(x = r[:,i],y=r[:,j], hue = v4color, palette = palette1, marker = marker1 , alpha = alpha1 ) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=20) # for legend text plt.setp(ax.get_legend().get_title(), fontsize=20) # for legend title plt.title(' ncvis ' + str_data_inf + ' n_samples='+str(len(r)) , fontsize = 20) plt.show()
code
128044650/cell_38
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.manifold import LocallyLinearEmbedding for n_neighbors in [5, 10]: print('n_neighbors', n_neighbors) fig = plt.figure(figsize=(25, 16)) plt.suptitle(str_data_inf + ' n_samples=' + str(len(X)), fontsize=20) c = 0 for method in ['standard', 'hessian', 'modified', 'ltsa']: if method == 'hessian': reducer = LocallyLinearEmbedding(n_components=2, n_neighbors=6, method=method) else: reducer = LocallyLinearEmbedding(n_components=2, n_neighbors=n_neighbors, method=method) t0 = time.time() try: if isinstance(X, pd.DataFrame): r = reducer.fit_transform(X.iloc[:N, :]) else: r = reducer.fit_transform(X[:N, :]) except Exception as e: print(e) print('Got Exception', method) continue t1 = time.time() print('LLE %s: %.2g sec' % (method, t1 - t0)) c += 1 fig.add_subplot(2, 2, c) if isinstance(v4color, pd.Series): sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color.iloc[:N], palette=palette1, marker=marker1, alpha=alpha1) else: sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color[:N], palette=palette1, marker=marker1, alpha=alpha1) plt.title('LLE ' + str(method), fontsize=20) plt.legend('') plt.show()
code
128044650/cell_24
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
fig = plt.figure(figsize=(20, 16)) c = 0 cc = 0 plt.suptitle('UMAP ' + str_data_inf + ' n_samples=' + str(len(X)), fontsize=20) for min_dist in [0.1, 0.9]: for n_neighbors in [5, 15, 100]: c += 1 fig.add_subplot(2, 3, c) str_inf = 'n_neighbors=' + str(n_neighbors) + ' min_dist=' + str(min_dist) reducer = umap.UMAP(n_neighbors=n_neighbors, min_dist=min_dist, n_components=2) r2 = reducer.fit_transform(X) i, j = (0, 1) ax = sns.scatterplot(x=r2[:, i], y=r2[:, j], hue=v4color, palette=palette1, marker=marker1, alpha=alpha1) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.title(str_inf, fontsize=20) plt.xlabel('UMAP' + str(i + 1), fontsize=20) plt.ylabel('UMAP' + str(j + 1), fontsize=20) if c == 1: plt.setp(ax.get_legend().get_texts(), fontsize=10) plt.setp(ax.get_legend().get_title(), fontsize=10) plt.legend(bbox_to_anchor=(1.15, 1.0), loc='upper right') else: plt.legend('') plt.show()
code
128044650/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns import umap r = umap.UMAP().fit_transform(X) print(r.shape) for i, j in [(0, 1)]: sns.scatterplot(x=r[:, i], y=r[:, j], hue=v4color, palette='rainbow') plt.xlabel('UMAP' + str(i + 1), fontsize=20) plt.ylabel('UMAP' + str(j + 1), fontsize=20) plt.show()
code
128044650/cell_22
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
!pip install MulticoreTSNE from MulticoreTSNE import MulticoreTSNE as TSNE reducer = TSNE(n_jobs=4) r = reducer.fit_transform(X) for (i,j) in [(0,1)]:#,(0,2),(1,2),(2,3),(2,4),(3,4)]: fig = plt.figure(figsize = (20,12) ); c = 0 ax = sns.scatterplot(x = r[:,i],y=r[:,j], hue = v4color, palette = palette1 , marker = marker1 , alpha = alpha1 ) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=20) # for legend text plt.setp(ax.get_legend().get_title(), fontsize=20) # for legend title plt.title(' MulticoreTSNE ' + str_data_inf + ' n_samples='+str(len(r)), fontsize = 20) plt.show()
code
128044650/cell_10
[ "text_plain_output_1.png" ]
n_x_subplots = 2 c = 0 cc = 0 for i, j in [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (0, 4), (1, 4), (2, 4), (3, 4), (0, 5)]: cc += 1 if c % n_x_subplots == 0: if c > 0: plt.show() fig = plt.figure(figsize=(20, 5)) c = 0 plt.suptitle(str_data_inf + ' n_samples=' + str(len(r)), fontsize=20) c += 1 fig.add_subplot(1, n_x_subplots, c) ax = sns.scatterplot(x=r[:, i], y=r[:, j], hue=v4color, palette=palette1, marker=marker1, alpha=alpha1) if c == 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.legend(bbox_to_anchor=(1.15, 1.0), loc='upper right') else: plt.legend('') plt.title('PCA ' + str(i) + ' ' + str(j), fontsize=20) plt.show()
code
128044650/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.decomposition import FastICA reducer = FastICA(n_components=5, random_state=0, whiten='unit-variance') r = reducer.fit_transform(X) n_x_subplots = 2 c = 0 cc = 0 for i, j in [(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4)]: if c % n_x_subplots == 0: if c > 0: plt.show() fig = plt.figure(figsize=(20, 5)) c = 0 plt.suptitle(str_data_inf + ' n_samples=' + str(len(r)), fontsize=20) c += 1 fig.add_subplot(1, n_x_subplots, c) ax = sns.scatterplot(x=r[:, i], y=r[:, j], hue=v4color, palette=palette1, marker=marker1, alpha=alpha1) if 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.title('ICA ' + str(i) + ' ' + str(j), fontsize=20) if c == 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.legend(bbox_to_anchor=(1.15, 1.0), loc='upper right') else: plt.legend('') plt.show()
code
128044650/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
features = 'esm2_t33_650M' if features == 'T5': fn = '/kaggle/input/t5embeds/train_embeds.npy' fn4submit = '/kaggle/input/t5embeds/test_embeds.npy' str_data_inf = 'CAFA5 T5 embeddings ' elif features == 'esm2_t33_650M': fn = '/kaggle/input/23468234/train_embeds_esm2_t33_650M_UR50D.npy' fn4submit = '/kaggle/input/23468234/test_embeds_esm2_t33_650M_UR50D.npy' str_data_inf = 'CAFA5 ' + features elif features == 'esm2_t30_150M': fn = '/kaggle/input/8947923/train_embeds_esm2_t30_150M_UR50D.npy' fn4submit = '/kaggle/input/8947923/test_embeds_esm2_t30_150M_UR50D.npy' str_data_inf = 'CAFA5 ' + features elif features == 'esm2_t12_35M': fn = '/kaggle/input/3023750/train_embeds_esm2_t12_35M_UR50D.npy' fn4submit = '/kaggle/input/3023750/test_embeds_esm2_t12_35M_UR50D.npy' str_data_inf = 'CAFA5 ' + features elif features == 'esm2_t6_8M': fn = '/kaggle/input/315701375/train_embeds_esm2_t6_8M_UR50D.npy' fn4submit = '/kaggle/input/315701375/test_embeds_esm2_t6_8M_UR50D.npy' str_data_inf = 'CAFA5 ' + features elif features == 'SGTv12': fn = '/kaggle/input/cafa5-sgt-protein-embeddings/run12_fit_on_15000x2/sgt_embendings_train_15000_15000.csv' fn4submit = '/kaggle/input/cafa5-sgt-protein-embeddings/run12_fit_on_15000x2/sgt_embendings_test_15000_15000.csv' str_data_inf = 'CAFA5 SGT embeddings v12 ' elif features == 'Leven5000': fn = '/kaggle/input/cafa5-levenshtein-distance-features-big/run9_achnor_set5000/df_Levenshtein_distance_5000_features_train.csv' fn4submit = '/kaggle/input/cafa5-levenshtein-distance-features-big/run9_achnor_set5000/df_Levenshtein_distance_5000_features_test.csv' str_data_inf = 'CAFA5 Levenshtein distance features anchor 5000 ' elif features == 'Leven3000': fn = '/kaggle/input/cafa5-levenshtein-distance-features-big/run8_anchor_set_3000/df_Levenshtein_distance_3000_features_train.csv' fn4submit = '/kaggle/input/cafa5-levenshtein-distance-features-big/run8_anchor_set_3000/df_Levenshtein_distance_3000_features_test.csv' str_data_inf = 'CAFA5 Levenshtein distance features anchor 3000 ' elif features == 'Leven1000': fn = '/kaggle/input/cafa5-features-etc/df_Levenshtein_distance_1000_features_train.csv' fn4submit = '/kaggle/input/cafa5-features-etc/df_Levenshtein_distance_1000_features_test.csv' str_data_inf = 'CAFA5 Levenshtein distance features anchor 1000 ' elif features == 'Leven100': fn = '/kaggle/input/cafa5-features-etc/df_Levenshtein_distance_100_features_train.csv' fn4submit = '/kaggle/input/cafa5-features-etc/df_Levenshtein_distance_100_features_test.csv' str_data_inf = 'CAFA5 Levenshtein distance features anchor 100 ' def load_features(fn): print(fn) if '.csv' in fn: df = pd.read_csv(fn, index_col=0) X = df.values elif '.npy' in fn: X = np.load(fn) print(X.shape) return X X = load_features(fn) str_data_inf += ' train' print(X.shape) X[:10, :10]
code
128044650/cell_36
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import umap from sklearn import manifold from sklearn.decomposition import PCA from sklearn.decomposition import FactorAnalysis from sklearn.decomposition import NMF from sklearn.decomposition import FastICA from sklearn.decomposition import FactorAnalysis from sklearn.decomposition import LatentDirichletAllocation from sklearn.ensemble import RandomTreesEmbedding from sklearn.random_projection import SparseRandomProjection from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.pipeline import make_pipeline from sklearn.decomposition import TruncatedSVD from collections import OrderedDict from functools import partial from matplotlib.ticker import NullFormatter n_neighbors = 10 n_components = 2 LLE = partial(manifold.LocallyLinearEmbedding, n_neighbors, n_components, eigen_solver='auto') methods = OrderedDict() methods['PCA'] = PCA() methods['umap'] = umap.UMAP(n_components=n_components) methods['t-SNE'] = manifold.TSNE(n_components=n_components, init='pca', random_state=0) methods['ICA'] = FastICA(n_components=n_components, random_state=0) methods['FA'] = FactorAnalysis(n_components=n_components, random_state=0) methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1) methods['SE'] = manifold.SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors) methods['NMF'] = NMF(n_components=n_components, init='random', random_state=0) methods['RandProj'] = SparseRandomProjection(n_components=n_components, random_state=42) rand_trees_embed = make_pipeline(RandomTreesEmbedding(n_estimators=200, random_state=0, max_depth=5), TruncatedSVD(n_components=n_components)) methods['RandTrees'] = rand_trees_embed methods['LatDirAll'] = LatentDirichletAllocation(n_components=n_components, random_state=0) list_fast_methods = ['PCA', 'umap', 'FA', 'NMF', 'RandProj', 'RandTrees'] list_slow_methods = ['t-SNE', 'LLE', 'Modified LLE', 'Isomap', 'MDS', 'SE', 'LatDirAll', 'LTSA', 'Hessian LLE'] fig = plt.figure(figsize=(25, 16)) plt.suptitle(str_data_inf + ' n_samples=' + str(len(X)), fontsize=20) c = 0 for i, (label, method) in enumerate(methods.items()): if label not in list_slow_methods: continue t0 = time.time() try: if isinstance(X, pd.DataFrame): r = method.fit_transform(X.iloc[:N, :]) else: r = method.fit_transform(X[:N, :]) except: print('Got Exception', label) continue t1 = time.time() print('%s: %.2g sec' % (label, t1 - t0)) c += 1 fig.add_subplot(2, 3, c) if isinstance(v4color, pd.Series): sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color.iloc[:N], palette=palette1, marker=marker1, alpha=alpha1) else: sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=v4color[:N], palette=palette1, marker=marker1, alpha=alpha1) plt.title(label, fontsize=20) if c == 1: plt.setp(ax.get_legend().get_texts(), fontsize=12) plt.setp(ax.get_legend().get_title(), fontsize=12) plt.legend(bbox_to_anchor=(1.15, 1.0), loc='upper right') else: plt.legend('') plt.show()
code
105199203/cell_4
[ "text_plain_output_1.png" ]
n1 = int(input('Enter your number 1')) n2 = int(input('Enter your number 2')) n3 = int(input('Enter your number 3')) max = n1 if max < n2: max = n2 if max < n3: max = n3 if n1 > n2 and n1 > n3: print(n1, 'Is the maximun value') elif n2 > n1 and n2 > n3: print(n2, 'Is the maximun value') elif n3 > n1 and n3 > n2: print(n3, 'Is the maximun value')
code
105199203/cell_3
[ "text_plain_output_1.png" ]
n1 = int(input('Enter your number 1')) n2 = int(input('Enter your number 2')) n3 = int(input('Enter your number 3')) max = n1 if max < n2: max = n2 if max < n3: max = n3 print(max, 'Is the maximun number')
code
122245369/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122245369/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import torchvision import torchvision.transforms as transforms train_transformer = transforms.Compose([transforms.RandomCrop(32, padding=5), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomRotation(degrees=20), transforms.ToTensor()]) test_transformer = transforms.Compose([transforms.ToTensor()]) train_dataset = torchvision.datasets.CIFAR10(root='.', train=True, transform=train_transformer, download=True) test_dataset = torchvision.datasets.CIFAR10(root='.', train=False, transform=test_transformer, download=True)
code
122246401/cell_13
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img load_img('../input/rice-image-dataset/Rice_Image_Dataset/Basmati/basmati (10009).jpg')
code
122246401/cell_9
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow import keras from tensorflow.keras.layers import Dense,Flatten from tensorflow.keras.models import Sequential train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='validation') train_data.class_indices from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten model = Sequential() model.add(Flatten(input_shape=(64, 64, 3))) model.add(Dense(40, activation='sigmoid')) model.add(Dense(5, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='accuracy') model.fit(train_data, validation_data=test_data, epochs=2) model = keras.models.load_model('../input/rice-classification/cnn_model.h5') model.summary()
code
122246401/cell_4
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='validation')
code
122246401/cell_6
[ "image_output_1.png" ]
from tensorflow.keras.layers import Dense,Flatten from tensorflow.keras.models import Sequential from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten model = Sequential() model.add(Flatten(input_shape=(64, 64, 3))) model.add(Dense(40, activation='sigmoid')) model.add(Dense(5, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary()
code
122246401/cell_2
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img load_img('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/Basmati/basmati (6626).jpg', target_size=(180, 180))
code
122246401/cell_11
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow import keras from tensorflow.keras.layers import Dense,Flatten from tensorflow.keras.models import Sequential train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='validation') train_data.class_indices from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten model = Sequential() model.add(Flatten(input_shape=(64, 64, 3))) model.add(Dense(40, activation='sigmoid')) model.add(Dense(5, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='accuracy') model.fit(train_data, validation_data=test_data, epochs=2) model = keras.models.load_model('../input/rice-classification/cnn_model.h5') model.summary() model.evaluate(test_data) predict1 = model.predict(test_data).argmax(axis=1) predict1
code
122246401/cell_8
[ "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.layers import Dense,Flatten from tensorflow.keras.models import Sequential train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='validation') train_data.class_indices from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten model = Sequential() model.add(Flatten(input_shape=(64, 64, 3))) model.add(Dense(40, activation='sigmoid')) model.add(Dense(5, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='accuracy') model.fit(train_data, validation_data=test_data, epochs=2)
code
122246401/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow import keras from tensorflow import keras,lite from tensorflow.keras.layers import Dense,Flatten from tensorflow.keras.models import Sequential import cv2 import numpy as np train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='validation') train_data.class_indices from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten model = Sequential() model.add(Flatten(input_shape=(64, 64, 3))) model.add(Dense(40, activation='sigmoid')) model.add(Dense(5, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='accuracy') model.fit(train_data, validation_data=test_data, epochs=2) model = keras.models.load_model('../input/rice-classification/cnn_model.h5') model.summary() model.evaluate(test_data) predict1 = model.predict(test_data).argmax(axis=1) predict1 import cv2 import numpy as np image = cv2.imread('../input/rice-img-test/jas3.jpg') image = cv2.resize(image, (64, 64)) image = image / 255 image = image.reshape(-1, 64, 64, 3) np.round(model.predict(image)).argmax(axis=1) from tensorflow import keras, lite converter = lite.TFLiteConverter.from_keras_model(model) tfmodel = converter.convert() open('linear.tflite', 'wb').write(tfmodel)
code
122246401/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training')
code
122246401/cell_14
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img load_img('../input/rice-image-dataset/Rice_Image_Dataset/Jasmine/Jasmine (10004).jpg')
code
122246401/cell_10
[ "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow import keras from tensorflow.keras.layers import Dense,Flatten from tensorflow.keras.models import Sequential train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='validation') train_data.class_indices from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten model = Sequential() model.add(Flatten(input_shape=(64, 64, 3))) model.add(Dense(40, activation='sigmoid')) model.add(Dense(5, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='accuracy') model.fit(train_data, validation_data=test_data, epochs=2) model = keras.models.load_model('../input/rice-classification/cnn_model.h5') model.summary() model.evaluate(test_data)
code
122246401/cell_12
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow import keras from tensorflow.keras.layers import Dense,Flatten from tensorflow.keras.models import Sequential import cv2 import numpy as np train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') test_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='validation') train_data.class_indices from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten model = Sequential() model.add(Flatten(input_shape=(64, 64, 3))) model.add(Dense(40, activation='sigmoid')) model.add(Dense(5, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='accuracy') model.fit(train_data, validation_data=test_data, epochs=2) model = keras.models.load_model('../input/rice-classification/cnn_model.h5') model.summary() model.evaluate(test_data) predict1 = model.predict(test_data).argmax(axis=1) predict1 import cv2 import numpy as np image = cv2.imread('../input/rice-img-test/jas3.jpg') image = cv2.resize(image, (64, 64)) image = image / 255 image = image.reshape(-1, 64, 64, 3) np.round(model.predict(image)).argmax(axis=1)
code
122246401/cell_5
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator,load_img train_gen = ImageDataGenerator(rescale=1 / 255.0, validation_split=0.3) train_data = train_gen.flow_from_directory('/kaggle/input/rice-image-dataset/Rice_Image_Dataset/', target_size=(64, 64), batch_size=1, class_mode='categorical', shuffle=False, subset='training') train_data.class_indices
code
16116674/cell_63
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() df_categorical_col = df.select_dtypes(exclude=np.number).columns df_categorical_col df_numeric_col = df.select_dtypes(include=np.number).columns df_numeric_col model = LinearRegression() model.fit(train_x, train_y) print('Predicting train data') train_predict = model.predict(train_x) print('Predicting test data') test_predict = model.predict(test_x) print(' ') print('MAE') print('Train data: ', mean_absolute_error(train_y, train_predict)) print('Test data: ', mean_absolute_error(test_y, test_predict)) print(' ') print('MSE') print('Train data: ', mean_squared_error(train_y, train_predict)) print('Test data: ', mean_squared_error(test_y, test_predict)) print(' ') print('RMSE') print('Train data: ', np.sqrt(mean_squared_error(train_y, train_predict))) print('Test data: ', np.sqrt(mean_squared_error(test_y, test_predict))) print(' ') print('R^2') print('Train data: ', r2_score(train_y, train_predict)) print('Test data: ', r2_score(test_y, test_predict))
code
16116674/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts()
code
16116674/cell_25
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df
code
16116674/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.head()
code
16116674/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() sns.pairplot(data=df)
code
16116674/cell_44
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() df_categorical_col = df.select_dtypes(exclude=np.number).columns df_categorical_col df_numeric_col = df.select_dtypes(include=np.number).columns df_numeric_col
code
16116674/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() sns.swarmplot(x=df['smoker'], y=df['expenses'])
code
16116674/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() sns.lmplot(x='bmi', y='expenses', hue='smoker', data=df)
code
16116674/cell_60
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_x, train_y) print(model.coef_)
code
16116674/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique()
code
16116674/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16116674/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape
code
16116674/cell_49
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() df_categorical_col = df.select_dtypes(exclude=np.number).columns df_categorical_col df_numeric_col = df.select_dtypes(include=np.number).columns df_numeric_col df_onehot = pd.get_dummies(df[df_categorical_col]) df_after_encoding = pd.concat([df[df_numeric_col], df_onehot], axis=1) df_after_encoding
code
16116674/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() sns.regplot(x=df['age'], y=df['expenses'])
code
16116674/cell_59
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_x, train_y) print(model.intercept_)
code
16116674/cell_58
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_x, train_y)
code
16116674/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.info()
code
16116674/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df
code
16116674/cell_47
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() df_categorical_col = df.select_dtypes(exclude=np.number).columns df_categorical_col df_numeric_col = df.select_dtypes(include=np.number).columns df_numeric_col df_onehot = pd.get_dummies(df[df_categorical_col]) df_onehot
code
16116674/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique()
code
16116674/cell_35
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() sns.scatterplot(x=df['bmi'], y=df['expenses'], hue=df['smoker'])
code
16116674/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() df_categorical_col = df.select_dtypes(exclude=np.number).columns df_categorical_col
code
16116674/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum()
code
16116674/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum()
code
16116674/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr()
code
16116674/cell_37
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.sex.unique() df.region.unique() df.smoker.value_counts() df.smoker.replace({'no': 0, 'yes': 1}, inplace=True) df.corr() sns.scatterplot(x=df['age'], y=df['expenses'], hue=df['smoker'])
code
16116674/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T df.shape df.isna().sum() df.duplicated().sum()
code
16116674/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/insurance.csv') df.describe().T
code
33121549/cell_13
[ "text_html_output_1.png" ]
from tensorflow import keras from tensorflow.keras import layers import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tensorflow_docs as tfdocs import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers tf.debugging.set_log_device_placement(False) dataset = pd.read_csv('/kaggle/input/pubg-finish-placement-prediction/train_V2.csv')[['damageDealt', 'headshotKills', 'killPlace', 'boosts', 'heals', 'winPlacePerc']].dropna() train_dataset = dataset.sample(frac=0.9, random_state=0) test_dataset = dataset.drop(train_dataset.index) train_stats = train_dataset.describe() train_stats.pop("winPlacePerc") train_stats = train_stats.transpose() train_stats train_labels = train_dataset.pop('winPlacePerc') test_labels = test_dataset.pop('winPlacePerc') def norm(x): return (x - train_stats['mean']) / train_stats['std'] normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) def build_model(): model = keras.Sequential([layers.Dense(64, activation='relu', input_shape=(len(train_dataset.keys()),)), layers.Dense(64, activation='relu'), layers.Dense(1)]) optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False) model.compile(loss='mae', optimizer=optimizer, metrics=['mae', 'mse']) return model model = build_model() early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) EPOCHS = 2 history = model.fit(normed_train_data, train_labels, epochs=EPOCHS, validation_split=0.2, verbose=1, batch_size=300, callbacks=[early_stop, tfdocs.modeling.EpochDots()])
code
33121549/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33121549/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('/kaggle/input/pubg-finish-placement-prediction/train_V2.csv')[['damageDealt', 'headshotKills', 'killPlace', 'boosts', 'heals', 'winPlacePerc']].dropna() train_dataset = dataset.sample(frac=0.9, random_state=0) test_dataset = dataset.drop(train_dataset.index) train_stats = train_dataset.describe() train_stats.pop('winPlacePerc') train_stats = train_stats.transpose() train_stats
code
33121549/cell_15
[ "text_plain_output_1.png" ]
""" test_predictions = model.predict(normed_test_data).flatten() a = plt.axes(aspect='equal') plt.scatter(test_labels, test_predictions) plt.xlabel('True Values') plt.ylabel('Predictions') lims = [0, 1] plt.xlim(lims) plt.ylim(lims) _ = plt.plot(lims, lims) """
code
33121549/cell_16
[ "text_plain_output_1.png" ]
""" error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error") _ = plt.ylabel("Count") """
code
34141750/cell_6
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34141750/cell_11
[ "text_plain_output_1.png" ]
# List /kaggle/input để xem các files và folders dữ liệu được liên kết với Notebook !ls /kaggle/input
code
34141750/cell_14
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/dataisbeautiful/r_dataisbeautiful_posts.csv') pd.read_csv('/kaggle/temp/temp.csv')
code
34141750/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/dataisbeautiful/r_dataisbeautiful_posts.csv') data.head(3)
code
129034049/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from bark import SAMPLE_RATE, generate_audio, preload_models from bark import SAMPLE_RATE, generate_audio, preload_models from IPython.display import Audio preload_models()
code
129034049/cell_2
[ "text_plain_output_1.png" ]
# install bark as well as pytorch nightly to get blazing fast flash-attention !pip install git+https://github.com/suno-ai/bark.git && \ pip uninstall -y torch torchvision torchaudio && \ pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118
code
128018796/cell_2
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
!pip install xmltodict
code
128018796/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from pathlib import Path from tqdm import tqdm from tsfresh import extract_features import datetime import numpy as np import pandas as pd import re import xmltodict import datetime import json import re from os import listdir from os.path import isfile, join from pathlib import Path import catboost import numpy as np import optuna import pandas as pd import shap import xmltodict from boruta import BorutaPy from dateutil.relativedelta import relativedelta from sklearn.ensemble import RandomForestRegressor from tqdm import tqdm pd.options.mode.chained_assignment = None DATA_ROOT = Path('/kaggle/input/kaggle-pog-series-s01e04/') DF_TRAIN = DATA_ROOT / 'train.csv' DF_TEST = DATA_ROOT / 'test.csv' RAW_DATA = DATA_ROOT / 'raw_health_export.xml' OUTPUT_PATH = '/kaggle/working/' TSFRESH_PATH = '/kaggle/input/datasets-sleep/' train = pd.read_csv(DF_TRAIN) test = pd.read_csv(DF_TEST) class FeatureExtractor: def __init__(self, data_path): raw_dict = FeatureExtractor.parse_raw_data(data_path) self.records = FeatureExtractor.get_records_df(raw_dict) @staticmethod def parse_raw_data(data_path): with open(RAW_DATA) as file: raw_dict = xmltodict.parse(file.read()) return raw_dict @staticmethod def get_records_df(raw_dict): records = pd.DataFrame(raw_dict['HealthData']['Record']) records.columns = [col.replace('@', '') for col in records.columns] records['creationDate'] = pd.to_datetime(records['creationDate']).dt.tz_localize(None) records['startDate'] = pd.to_datetime(records['startDate']).dt.tz_localize(None) records['endDate'] = pd.to_datetime(records['endDate']).dt.tz_localize(None) records['type'] = records['type'].str.replace('HKQuantityTypeIdentifier', '') return records @staticmethod def get_type_ts(records, type): if type != 'BoostedHeartRate': records['is_apple'] = records['sourceName'].str.contains('Apple') type_records = records.query(f"type == '{type}' and is_apple") type_records = type_records.drop(['type'], axis=1) type_records['value'] = type_records['value'].astype(float) else: hrv = records.query('HeartRateVariabilityMetadataList == HeartRateVariabilityMetadataList') extract_list = lambda x: x['InstantaneousBeatsPerMinute'] hrv['HeartRateVariabilityMetadataList'] = hrv['HeartRateVariabilityMetadataList'].apply(extract_list) hrv_exp = hrv.explode('HeartRateVariabilityMetadataList') get_bpm = lambda x: x['@bpm'] get_time = lambda x: x['@time'] hrv_exp['bpm'] = hrv_exp['HeartRateVariabilityMetadataList'].apply(get_bpm) hrv_exp['time'] = hrv_exp['HeartRateVariabilityMetadataList'].apply(get_time) hrv_exp['time_date'] = pd.to_datetime(hrv_exp['time'], format='%I:%M:%S.%f %p') hrv_exp.loc[hrv_exp['time_date'].dt.hour == 0, 'date'] = hrv_exp[hrv_exp['time_date'].dt.hour == 0]['endDate'].dt.date hrv_exp.loc[hrv_exp['time_date'].dt.hour > 0, 'date'] = hrv_exp[hrv_exp['time_date'].dt.hour > 0]['startDate'].dt.date hrv_exp['hours'] = hrv_exp['time_date'].astype('datetime64[s]').dt.time hrv_exp['new_startDate'] = pd.to_datetime(hrv_exp['date'].astype(str) + ' ' + hrv_exp['hours'].astype(str)) hrv_exp = hrv_exp.drop(['startDate', 'value'], axis=1) hrv_exp = hrv_exp.rename(columns={'new_startDate': 'startDate', 'bpm': 'value'}) hrv_exp = hrv_exp[['startDate', 'endDate', 'value']] hrv_exp['value'] = hrv_exp['value'].astype(float) hrv_exp.reset_index(inplace=True, drop=True) type = 'HeartRate' records['is_apple'] = records['sourceName'].str.contains('Apple') type_records = records.query(f"type == '{type}' and is_apple") type_records = type_records.drop(['type'], axis=1) type_records['value'] = type_records['value'].astype(float) type_records = type_records[['startDate', 'endDate', 'value']] type_records = pd.concat([type_records, hrv_exp], ignore_index=True) type_records = type_records[['startDate', 'endDate', 'value']] return type_records @staticmethod def resample_ts(records, resample='5Min', how='mean'): records = records.sort_values('startDate') records = records.set_index('startDate') records = eval(f"records.resample('{resample}').{how}()") records = records.fillna(method='pad') records = records.reset_index() return records @staticmethod def trim_outliers(records): save_records = records.copy() Q1 = np.percentile(records['value'], 25, interpolation='midpoint') Q3 = np.percentile(records['value'], 75, interpolation='midpoint') IQR = Q3 - Q1 upper = Q3 + 1.5 * IQR lower = Q1 - 1.5 * IQR records.loc[records['value'] > upper, 'value'] = upper records.loc[records['value'] < lower, 'value'] = lower if records['value'].sum() == 0: return save_records return records @staticmethod def tsfresh(records, prefix): from tsfresh import extract_features records = records.sort_values('startDate') records['timestep'] = records.groupby(['date']).cumcount() + 1 extracted_features = extract_features(records[['date', 'timestep', 'value']], column_id='date', column_sort='timestep') extracted_features = extracted_features.reset_index() extracted_features = extracted_features.rename(columns={'index': 'date'}) extracted_features.columns = ['date'] + [prefix + '_' + str(col) for col in extracted_features.columns[1:]] extracted_features = extracted_features.rename(columns=lambda x: re.sub('[^A-Za-z0-9_]+', '', x)) return extracted_features @staticmethod def add_date(records, how): if how == '24h': records['date'] = (records['startDate'] - datetime.timedelta(hours=12)).dt.date elif how == '7days': records['date'] = records['startDate'].dt.date df_result = records[0:0] dates = records['date'].unique() for d in dates: min_d = d - datetime.timedelta(days=7) max_d = d df_date = records.query('date >= @min_d and date <= @max_d') df_date['date'] = d df_result = pd.concat([df_result, df_date]) records = df_result elif how == '3daysAfter': records['date'] = records['startDate'].dt.date df_result = records[0:0] dates = records['date'].unique() for d in dates: min_d = d max_d = d + datetime.timedelta(days=3) df_date = records.query('date >= @min_d and date <= @max_d') df_date['date'] = d df_result = pd.concat([df_result, df_date]) records = df_result return records aggregation_info = {'BoostedHeartRate': 'mean', 'HeartRate': 'mean', 'ActiveEnergyBurned': 'sum', 'StepCount': 'sum', 'BasalEnergyBurned': 'mean', 'AppleStandTime': 'mean', 'FlightsClimbed': 'sum', 'EnvironmentalAudioExposure': 'mean', 'RespiratoryRate': 'mean', 'AppleExerciseTime': 'sum', 'DistanceWalkingRunning': 'sum', 'RespiratoryRate': 'mean', 'HeadphoneAudioExposure': 'mean', 'StairDescentSpeed': 'mean', 'OxygenSaturation': 'mean', 'StairAscentSpeed': 'mean', 'HeartRateVariabilitySDNN': 'mean'} aggregation_info = {} for type in tqdm(aggregation_info.keys()): print(type) ts_type = FeatureExtractor.get_type_ts(fe.records, type) prefix = '24h_' + type ts_resample = FeatureExtractor.resample_ts(ts_type, resample='5Min', how=aggregation_info[type]) ts_resample = FeatureExtractor.trim_outliers(ts_resample) print(ts_resample.describe()) ts_resample = FeatureExtractor.add_date(ts_resample, how='24h') ts_resample = FeatureExtractor.tsfresh(ts_resample, prefix) ts_resample.to_csv(f'{OUTPUT_PATH}/tsfresh_{prefix}.csv', index=False) prefix = '7d_' + type ts_resample = FeatureExtractor.resample_ts(ts_type, resample='60min', how=aggregation_info[type]) print(ts_resample.describe()) ts_resample = FeatureExtractor.trim_outliers(ts_resample) ts_resample = FeatureExtractor.add_date(ts_resample, how='7days') ts_resample = FeatureExtractor.tsfresh(ts_resample, prefix) ts_resample.to_csv(f'{OUTPUT_PATH}/tsfresh_{prefix}.csv', index=False) prefix = '24h_1m_' + type ts_resample = FeatureExtractor.resample_ts(ts_type, resample='1Min', how='first') ts_resample = FeatureExtractor.trim_outliers(ts_resample) print(ts_resample.describe()) ts_resample = FeatureExtractor.add_date(ts_resample, how='24h') ts_resample = FeatureExtractor.tsfresh(ts_resample, prefix) ts_resample.to_csv(f'{OUTPUT_PATH}/tsfresh_{prefix}.csv', index=False) prefix = '24h_1m_10r_' + type ts_resample = FeatureExtractor.resample_ts(ts_type, resample='1Min', how='first') ts_resample['value'] = ts_resample['value'].rolling(window=10).var() ts_resample['value'] = ts_resample['value'].fillna(0) ts_resample = FeatureExtractor.trim_outliers(ts_resample) print(ts_resample.describe()) ts_resample = FeatureExtractor.add_date(ts_resample, how='24h') ts_resample = FeatureExtractor.tsfresh(ts_resample, prefix) ts_resample.to_csv(f'{OUTPUT_PATH}/tsfresh_{prefix}.csv', index=False)
code
128018796/cell_14
[ "text_plain_output_1.png" ]
from boruta import BorutaPy from os import listdir from os.path import isfile, join from pathlib import Path from sklearn.ensemble import RandomForestRegressor from tqdm import tqdm from tsfresh import extract_features import datetime import json import numpy as np import pandas as pd import re import shap import xmltodict import datetime import json import re from os import listdir from os.path import isfile, join from pathlib import Path import catboost import numpy as np import optuna import pandas as pd import shap import xmltodict from boruta import BorutaPy from dateutil.relativedelta import relativedelta from sklearn.ensemble import RandomForestRegressor from tqdm import tqdm pd.options.mode.chained_assignment = None DATA_ROOT = Path('/kaggle/input/kaggle-pog-series-s01e04/') DF_TRAIN = DATA_ROOT / 'train.csv' DF_TEST = DATA_ROOT / 'test.csv' RAW_DATA = DATA_ROOT / 'raw_health_export.xml' OUTPUT_PATH = '/kaggle/working/' TSFRESH_PATH = '/kaggle/input/datasets-sleep/' train = pd.read_csv(DF_TRAIN) test = pd.read_csv(DF_TEST) class FeatureExtractor: def __init__(self, data_path): raw_dict = FeatureExtractor.parse_raw_data(data_path) self.records = FeatureExtractor.get_records_df(raw_dict) @staticmethod def parse_raw_data(data_path): with open(RAW_DATA) as file: raw_dict = xmltodict.parse(file.read()) return raw_dict @staticmethod def get_records_df(raw_dict): records = pd.DataFrame(raw_dict['HealthData']['Record']) records.columns = [col.replace('@', '') for col in records.columns] records['creationDate'] = pd.to_datetime(records['creationDate']).dt.tz_localize(None) records['startDate'] = pd.to_datetime(records['startDate']).dt.tz_localize(None) records['endDate'] = pd.to_datetime(records['endDate']).dt.tz_localize(None) records['type'] = records['type'].str.replace('HKQuantityTypeIdentifier', '') return records @staticmethod def get_type_ts(records, type): if type != 'BoostedHeartRate': records['is_apple'] = records['sourceName'].str.contains('Apple') type_records = records.query(f"type == '{type}' and is_apple") type_records = type_records.drop(['type'], axis=1) type_records['value'] = type_records['value'].astype(float) else: hrv = records.query('HeartRateVariabilityMetadataList == HeartRateVariabilityMetadataList') extract_list = lambda x: x['InstantaneousBeatsPerMinute'] hrv['HeartRateVariabilityMetadataList'] = hrv['HeartRateVariabilityMetadataList'].apply(extract_list) hrv_exp = hrv.explode('HeartRateVariabilityMetadataList') get_bpm = lambda x: x['@bpm'] get_time = lambda x: x['@time'] hrv_exp['bpm'] = hrv_exp['HeartRateVariabilityMetadataList'].apply(get_bpm) hrv_exp['time'] = hrv_exp['HeartRateVariabilityMetadataList'].apply(get_time) hrv_exp['time_date'] = pd.to_datetime(hrv_exp['time'], format='%I:%M:%S.%f %p') hrv_exp.loc[hrv_exp['time_date'].dt.hour == 0, 'date'] = hrv_exp[hrv_exp['time_date'].dt.hour == 0]['endDate'].dt.date hrv_exp.loc[hrv_exp['time_date'].dt.hour > 0, 'date'] = hrv_exp[hrv_exp['time_date'].dt.hour > 0]['startDate'].dt.date hrv_exp['hours'] = hrv_exp['time_date'].astype('datetime64[s]').dt.time hrv_exp['new_startDate'] = pd.to_datetime(hrv_exp['date'].astype(str) + ' ' + hrv_exp['hours'].astype(str)) hrv_exp = hrv_exp.drop(['startDate', 'value'], axis=1) hrv_exp = hrv_exp.rename(columns={'new_startDate': 'startDate', 'bpm': 'value'}) hrv_exp = hrv_exp[['startDate', 'endDate', 'value']] hrv_exp['value'] = hrv_exp['value'].astype(float) hrv_exp.reset_index(inplace=True, drop=True) type = 'HeartRate' records['is_apple'] = records['sourceName'].str.contains('Apple') type_records = records.query(f"type == '{type}' and is_apple") type_records = type_records.drop(['type'], axis=1) type_records['value'] = type_records['value'].astype(float) type_records = type_records[['startDate', 'endDate', 'value']] type_records = pd.concat([type_records, hrv_exp], ignore_index=True) type_records = type_records[['startDate', 'endDate', 'value']] return type_records @staticmethod def resample_ts(records, resample='5Min', how='mean'): records = records.sort_values('startDate') records = records.set_index('startDate') records = eval(f"records.resample('{resample}').{how}()") records = records.fillna(method='pad') records = records.reset_index() return records @staticmethod def trim_outliers(records): save_records = records.copy() Q1 = np.percentile(records['value'], 25, interpolation='midpoint') Q3 = np.percentile(records['value'], 75, interpolation='midpoint') IQR = Q3 - Q1 upper = Q3 + 1.5 * IQR lower = Q1 - 1.5 * IQR records.loc[records['value'] > upper, 'value'] = upper records.loc[records['value'] < lower, 'value'] = lower if records['value'].sum() == 0: return save_records return records @staticmethod def tsfresh(records, prefix): from tsfresh import extract_features records = records.sort_values('startDate') records['timestep'] = records.groupby(['date']).cumcount() + 1 extracted_features = extract_features(records[['date', 'timestep', 'value']], column_id='date', column_sort='timestep') extracted_features = extracted_features.reset_index() extracted_features = extracted_features.rename(columns={'index': 'date'}) extracted_features.columns = ['date'] + [prefix + '_' + str(col) for col in extracted_features.columns[1:]] extracted_features = extracted_features.rename(columns=lambda x: re.sub('[^A-Za-z0-9_]+', '', x)) return extracted_features @staticmethod def add_date(records, how): if how == '24h': records['date'] = (records['startDate'] - datetime.timedelta(hours=12)).dt.date elif how == '7days': records['date'] = records['startDate'].dt.date df_result = records[0:0] dates = records['date'].unique() for d in dates: min_d = d - datetime.timedelta(days=7) max_d = d df_date = records.query('date >= @min_d and date <= @max_d') df_date['date'] = d df_result = pd.concat([df_result, df_date]) records = df_result elif how == '3daysAfter': records['date'] = records['startDate'].dt.date df_result = records[0:0] dates = records['date'].unique() for d in dates: min_d = d max_d = d + datetime.timedelta(days=3) df_date = records.query('date >= @min_d and date <= @max_d') df_date['date'] = d df_result = pd.concat([df_result, df_date]) records = df_result return records N_FOLDS = 10 RUN_HBO = False N_TRIALS = 100 N_FEAT = 100 target = 'sleep_hours' train = pd.read_csv(DF_TRAIN) test = pd.read_csv(DF_TEST) def trim_outliers(records): Q1 = np.percentile(records['sleep_hours'], 25, interpolation='midpoint') Q3 = np.percentile(records['sleep_hours'], 75, interpolation='midpoint') IQR = Q3 - Q1 upper = Q3 + 1.5 * IQR lower = Q1 - 1.5 * IQR records.loc[records['sleep_hours'] > upper, 'sleep_hours'] = upper records.loc[records['sleep_hours'] < lower, 'sleep_hours'] = lower return records def add_fold(train, N_FOLDS): np.random.seed(20951) folds = np.random.randint(0, high=N_FOLDS, size=len(train), dtype=int) unique, counts = np.unique(folds, return_counts=True) train['fold'] = folds dict(zip(unique, counts)) return train train = add_fold(train, N_FOLDS) train = train.query("date >= '2020-09-26'").reset_index(drop=True) def get_feat_boruta(X, y): model = RandomForestRegressor(n_estimators=100, max_depth=5, random_state=42) feat_selector = BorutaPy(verbose=0, estimator=model, n_estimators='auto', max_iter=20, two_step=False) feat_selector.fit(np.array(X), np.array(y)) feats = [] for i in range(len(feat_selector.support_)): if feat_selector.support_[i]: feats.append(X.columns[i]) return feats def global_shap_importance(model, X): explainer = shap.Explainer(model) shap_values = explainer(X) cohorts = {'': shap_values} cohort_labels = list(cohorts.keys()) cohort_exps = list(cohorts.values()) for i in range(len(cohort_exps)): if len(cohort_exps[i].shape) == 2: cohort_exps[i] = cohort_exps[i].abs.mean(0) features = cohort_exps[0].data feature_names = cohort_exps[0].feature_names values = np.array([cohort_exps[i].values for i in range(len(cohort_exps))]) feature_importance = pd.DataFrame(list(zip(feature_names, sum(values))), columns=['features', 'importance']) feature_importance.sort_values(by=['importance'], ascending=False, inplace=True) return feature_importance def get_feat_shap(X, y, n_feats): model = RandomForestRegressor(n_estimators=20, max_depth=5, random_state=42) model.fit(X, y) explainer = shap.TreeExplainer(model) shap_values = explainer.shap_values(X) feat_imp = global_shap_importance(model, X) top_feats = list(feat_imp['features'])[:n_feats] return top_feats train_ = train.copy() test_ = test.copy() datasets_to_include = [f.split('.')[0] for f in listdir(TSFRESH_PATH) if isfile(join(TSFRESH_PATH, f))] datasets_to_include = [f for f in datasets_to_include if 'tsfresh' in f] try: with open(f'{TSFRESH_PATH}features.json') as json_file: selection_checkpoint = json.load(json_file) except: selection_checkpoint = {} for d in tqdm(datasets_to_include): print('Loading:', d) d_train = train_.copy() d_train = d_train.merge(pd.read_csv(f'{TSFRESH_PATH}{d}.csv'), on='date', how='left', suffixes=('', f'_{d}')) d_train = d_train.fillna(0) d_train = d_train.replace(np.inf, 0) print('Original Features:', len(d_train.columns) - 3) X_train = d_train.drop([target, 'date', 'fold'], axis=1) y_train = d_train[target].tolist() if d in selection_checkpoint.keys(): selected_feats = selection_checkpoint[d] else: selected_feats = get_feat_shap(X_train, y_train, N_FEAT) X_train = X_train[selected_feats] selected_feats = get_feat_boruta(X_train, y_train) d_train = d_train[selected_feats] d_train['date'] = train_['date'] print('Selected Features:', len(d_train.columns) - 1) train = train.merge(d_train, on='date', how='left') train_cols = train.columns.tolist() train_cols.remove('fold') d_test = test_.copy() d_test = d_test.merge(pd.read_csv(f'{TSFRESH_PATH}{d}.csv'), on='date', how='left', suffixes=('', f'_{d}')) d_test = d_test.fillna(0) d_test = d_test.replace(np.inf, 0) d_test = d_test.drop(['sleep_hours'], axis=1) test = test.merge(d_test, on='date', how='left')[train_cols] break train['synthetic'] = False print('All features num:', len(train.columns))
code
33106981/cell_13
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv') hotel.shape hotel.head().T hotel_num = hotel.dtypes[hotel.dtypes != 'object'] hotel_num = hotel_num.index.to_list() Date_Drop = {'is_canceled', 'company'} hotel_num = [ele for ele in hotel_num if ele not in Date_Drop] hotel_num hot_num = hotel[hotel_num].copy() from collections import Counter def detect_outliers(df, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) return outlier_indices Outliers_to_drop = detect_outliers(hotel, hot_num) len(Outliers_to_drop) hotel = hotel.drop(Outliers_to_drop, axis=0).reset_index(drop=True) hotel.isna().sum()
code
33106981/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv') hotel.shape hotel.head().T hotel_num = hotel.dtypes[hotel.dtypes != 'object'] hotel_num = hotel_num.index.to_list() Date_Drop = {'is_canceled', 'company'} hotel_num = [ele for ele in hotel_num if ele not in Date_Drop] hotel_num hot_num = hotel[hotel_num].copy() hotel_num.remove('arrival_date_year') hotel_num.remove('arrival_date_week_number') hotel_num.remove('arrival_date_day_of_month') hotel_num
code
33106981/cell_23
[ "text_plain_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv') hotel.shape hotel.head().T hotel_num = hotel.dtypes[hotel.dtypes != 'object'] hotel_num = hotel_num.index.to_list() Date_Drop = {'is_canceled', 'company'} hotel_num = [ele for ele in hotel_num if ele not in Date_Drop] hotel_num hot_num = hotel[hotel_num].copy() from collections import Counter def detect_outliers(df, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) return outlier_indices Outliers_to_drop = detect_outliers(hotel, hot_num) len(Outliers_to_drop) hotel = hotel.drop(Outliers_to_drop, axis=0).reset_index(drop=True) hotel.isna().sum() hotel.company = hotel.company.fillna(0) hotel.agent = hotel.agent.fillna(0) hotel.children = hotel.children.fillna(0) hotel.country = hotel.country.fillna('unknown') hotel.drop(hotel[(hotel['children'] == 0) & (hotel['babies'] == 0) & (hotel['adults'] == 0)].index, inplace=True) def cnt_plot(a): col = hotel[a] title = 'Category wise count of' + ' ' + a Cat_Var = hotel.dtypes[hotel.dtypes == 'object'] Cat_Var = Cat_Var.index.to_list() Date_Drop = {'arrival_date_month', 'reservation_status_date'} Cat_Var = [ele for ele in Cat_Var if ele not in Date_Drop] Cat_Var for col in Cat_Var: cnt_plot(col)
code
33106981/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv') hotel.shape hotel.head().T hotel.describe(percentiles=[0.25, 0.5, 0.75, 0.9, 0.95, 0.99])
code
33106981/cell_26
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv') hotel.shape hotel.head().T hotel_num = hotel.dtypes[hotel.dtypes != 'object'] hotel_num = hotel_num.index.to_list() Date_Drop = {'is_canceled', 'company'} hotel_num = [ele for ele in hotel_num if ele not in Date_Drop] hotel_num hot_num = hotel[hotel_num].copy() from collections import Counter def detect_outliers(df, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) return outlier_indices Outliers_to_drop = detect_outliers(hotel, hot_num) len(Outliers_to_drop) hotel = hotel.drop(Outliers_to_drop, axis=0).reset_index(drop=True) hotel.isna().sum() hotel.company = hotel.company.fillna(0) hotel.agent = hotel.agent.fillna(0) hotel.children = hotel.children.fillna(0) hotel.country = hotel.country.fillna('unknown') hotel.drop(hotel[(hotel['children'] == 0) & (hotel['babies'] == 0) & (hotel['adults'] == 0)].index, inplace=True) def cnt_plot(a): col = hotel[a] title = 'Category wise count of' + ' ' + a Cat_Var = hotel.dtypes[hotel.dtypes == 'object'] Cat_Var = Cat_Var.index.to_list() Date_Drop = {'arrival_date_month', 'reservation_status_date'} Cat_Var = [ele for ele in Cat_Var if ele not in Date_Drop] Cat_Var corrmap = hotel.corr() plt.subplots(figsize=(16, 10)) sns.heatmap(corrmap, annot=True) plt.show()
code
33106981/cell_11
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) hotel = pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv') hotel.shape hotel.head().T hotel_num = hotel.dtypes[hotel.dtypes != 'object'] hotel_num = hotel_num.index.to_list() Date_Drop = {'is_canceled', 'company'} hotel_num = [ele for ele in hotel_num if ele not in Date_Drop] hotel_num hot_num = hotel[hotel_num].copy() from collections import Counter def detect_outliers(df, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) return outlier_indices Outliers_to_drop = detect_outliers(hotel, hot_num) len(Outliers_to_drop)
code
33106981/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code