path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88087713/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes sample_trans_data = trans_data[trans_data['year_trans'] == 2019] sample_trans_data.isna().sum() sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True) sample_trans_data.reset_index(drop=True, inplace=True)
code
88087713/cell_8
[ "image_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True)
code
88087713/cell_15
[ "text_html_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') articles_data[['prod_name', 'product_type_name', 'product_group_name']].describe()
code
88087713/cell_17
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') articles_data_new = articles_data[['prod_name', 'product_type_name', 'product_group_name']].copy() articles_data_new.head(5)
code
88087713/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) interval_range_age = pd.interval_range(start=0, freq=10, end=100) customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age) customers_data_new.head(5)
code
88087713/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes sample_trans_data = trans_data[trans_data['year_trans'] == 2019] sample_trans_data.isna().sum() sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True) sample_trans_data.reset_index(drop=True, inplace=True) sample_trans_data.isna().sum() sample_trans_data.head(5)
code
88087713/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') articles_data.head(5)
code
88087713/cell_22
[ "text_html_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes
code
88087713/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.head(5)
code
88087713/cell_12
[ "text_html_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.info()
code
88087713/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) interval_range_age = pd.interval_range(start=0, freq=10, end=100) customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age) customers_data_new.isna().sum()
code
16154469/cell_9
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import matplotlib.pylab as plt import networkx as nx import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') structures_df = pd.read_csv('../input/structures.csv') test_df['scalar_coupling_constant'] = np.nan df = pd.concat([train_df, test_df]) del train_df del test_df for atom_index in [0, 1]: renamed_columns = {col: col + '_' + str(atom_index) for col in ['x', 'y', 'z', 'atom_index', 'atom']} df = df.merge(structures_df.rename(columns=renamed_columns), on=['molecule_name', 'atom_index_' + str(atom_index)], how='inner') df['distance_l2'] = ((df['x_0'] - df['x_1']) ** 2 + (df['y_0'] - df['y_1']) ** 2 + (df['z_0'] - df['z_1']) ** 2) ** 0.5 MOLECULE_NAMES = df['molecule_name'].unique() def get_molecule_graph(df, molecule_name): molecule_df = df.loc[lambda df: df['molecule_name'] == molecule_name] labels = molecule_df[['atom_1', 'atom_index_1']].set_index('atom_index_1')['atom_1'].to_dict() labels.update(molecule_df[['atom_0', 'atom_index_0']].set_index('atom_index_0')['atom_0'].to_dict()) graph = nx.from_pandas_edgelist(molecule_df, source='atom_index_0', target='atom_index_1', edge_attr='scalar_coupling_constant', create_using=nx.Graph()) return graph, labels def draw_graph(graph, labels, weight="distance_l2"): position = nx.spring_layout(graph, weight=weight) fig, ax = plt.subplots(1, 1, figsize=(6, 6)) nx.draw_networkx_nodes(graph, position, node_color='red', alpha = 0.8, ax=ax) nx.draw_networkx_edges(graph, position, edge_color='blue', alpha = 0.6, ax=ax) nx.draw_networkx_labels(graph, position, labels, font_size=16, ax=ax) return ax for molecule_name in MOLECULE_NAMES[:10]: graph, labels = get_molecule_graph(df, molecule_name) ax = draw_graph(graph, labels) ax.set_title(f'Graph for {molecule_name}')
code
130014083/cell_42
[ "text_html_output_1.png" ]
from wordcloud import WordCloud import matplotlib as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() text_data = df['Departure City'].str.cat(sep=' ') wordcloud = WordCloud(width=800, height=400, background_color='black').generate(text_data) plt.axis('off') text_data = df['Arrival City'].str.cat(sep=' ') wordcloud = WordCloud(width=800, height=400, background_color='black').generate(text_data) plt.axis('off') text_data = df['Name'].str.cat(sep=' ') wordcloud = WordCloud(width=800, height=400, background_color='black').generate(text_data) plt.figure(figsize=(10, 6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
130014083/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() df.describe()
code
130014083/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') filtered_df = df[df['Delay Minutes'] > 60] filtered_df.head()
code
130014083/cell_25
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() max_value = np.max(df['Ticket Price']) max_value min_value = np.min(df['Ticket Price']) min_value
code
130014083/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.pairplot(df, hue='Frequent Flyer Status')
code
130014083/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() df['Ticket Price'].mean()
code
130014083/cell_30
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.countplot(x='Booking Class', data=df)
code
130014083/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.pairplot(df, hue='Booking Class')
code
130014083/cell_44
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() X = df[['Flight Duration', 'Ticket Price', 'Competitor Price', 'Demand']] y = df['Delay Minutes'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = LinearRegression() model.fit(X_train, y_train) predictions = model.predict(X_test) for prediction in predictions: print(prediction)
code
130014083/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna()
code
130014083/cell_6
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') df.head()
code
130014083/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() text_data = df['Departure City'].str.cat(sep=' ') wordcloud = WordCloud(width=800, height=400, background_color='black').generate(text_data) plt.figure(figsize=(10, 6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
130014083/cell_29
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.scatterplot(x='Demand', y='Profitability', data=df)
code
130014083/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() df.head()
code
130014083/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() df.head()
code
130014083/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() text_data = df['Departure City'].str.cat(sep=' ') wordcloud = WordCloud(width=800, height=400, background_color='black').generate(text_data) plt.axis('off') text_data = df['Arrival City'].str.cat(sep=' ') wordcloud = WordCloud(width=800, height=400, background_color='black').generate(text_data) plt.figure(figsize=(10, 6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
130014083/cell_19
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() Avg_ticket_price.head()
code
130014083/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130014083/cell_7
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') df.info()
code
130014083/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() Avg_delay_min.head()
code
130014083/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.pairplot(df)
code
130014083/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.histplot(x='Demand', data=df, bins=10)
code
130014083/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) sorted_df.head()
code
130014083/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean()
code
130014083/cell_35
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.displot(df['Loyalty Points'], kde=True, bins=10)
code
130014083/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.boxplot(x='Flight Duration', data=df)
code
130014083/cell_24
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() max_value = np.max(df['Ticket Price']) max_value
code
130014083/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') filtered_df = df[df['Delay Minutes'] > 60] filtered_df = df[df['Churned'] == False] filtered_df.head()
code
130014083/cell_22
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() df['Ticket Price'].sum()
code
130014083/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') df.head()
code
130014083/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm')
code
130014083/cell_37
[ "text_plain_output_1.png", "image_output_1.png" ]
import missingno as msno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() msno.matrix(df)
code
130014083/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') selected_columns = ['Departure City', 'Arrival City', 'Flight Duration', 'Delay Minutes', 'Booking Class'] df_selected = df[selected_columns] df_selected
code
130014083/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/flight-dataset/Flight_data.csv') sorted_df = df.sort_values('Customer ID', ascending=False) Avg_ticket_price = df.groupby('Ticket Price').mean() Avg_delay_min = df.groupby('Delay Minutes').mean() df.dropna() correlation_matrix = df.corr() sns.barplot(x='Ticket Price', y='Competitor Price', data=df)
code
72118922/cell_13
[ "text_html_output_1.png" ]
from keras.models import Sequential from tensorflow.keras.layers import Dense,Flatten,Dropout,Conv2D,MaxPooling2D,BatchNormalization from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical from tensorflow.keras.utils import to_categorical from tqdm.notebook import tqdm import cv2 import numpy as np import os import pandas as pd import pydicom train_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train') test_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/test') labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels sample = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv') sample def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) data = (data * 255).astype(np.uint8) return data path0 = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00000/T1wCE/Image-1.dcm' img1 = load_dicom(path0) img2 = cv2.resize(img1, (9, 9)) train_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train' trainset = [] trainlabel = [] trainidt = [] for i in tqdm(range(len(labels))): idt = labels.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(train_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 trainset += [image] trainlabel += [labels.loc[i, 'MGMT_value']] trainidt += [idt] test_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test' testset = [] testidt = [] for i in tqdm(range(len(sample))): idt = sample.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(test_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 testset += [image] testidt += [idt] from tensorflow.keras.utils import to_categorical y0 = np.array(trainlabel) Y_train = to_categorical(y0) X_train = np.array(trainset) X_test = np.array(testset) model = Sequential() model.add(Conv2D(64, (4, 4), input_shape=(9, 9, 1), activation='relu')) model.add(Conv2D(32, (2, 2), activation='relu')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() his = model.fit(X_train, Y_train, validation_split=0.2, epochs=100, batch_size=64, verbose=2)
code
72118922/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels sample = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv') sample
code
72118922/cell_6
[ "text_html_output_1.png" ]
import cv2 import numpy as np import pydicom def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) data = (data * 255).astype(np.uint8) return data path0 = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00000/T1wCE/Image-1.dcm' img1 = load_dicom(path0) img2 = cv2.resize(img1, (9, 9)) print(img1.shape) print(img2.shape)
code
72118922/cell_7
[ "text_html_output_1.png" ]
from tensorflow.keras.preprocessing.image import load_img, img_to_array from tqdm.notebook import tqdm import cv2 import numpy as np import os import pandas as pd import pydicom train_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train') test_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/test') labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) data = (data * 255).astype(np.uint8) return data path0 = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00000/T1wCE/Image-1.dcm' img1 = load_dicom(path0) img2 = cv2.resize(img1, (9, 9)) train_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train' trainset = [] trainlabel = [] trainidt = [] for i in tqdm(range(len(labels))): idt = labels.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(train_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 trainset += [image] trainlabel += [labels.loc[i, 'MGMT_value']] trainidt += [idt]
code
72118922/cell_18
[ "text_plain_output_1.png" ]
from keras.models import Sequential from tensorflow.keras.layers import Dense,Flatten,Dropout,Conv2D,MaxPooling2D,BatchNormalization from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical from tensorflow.keras.utils import to_categorical from tqdm.notebook import tqdm import cv2 import numpy as np import os import pandas as pd import pydicom train_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train') test_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/test') labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels sample = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv') sample def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) data = (data * 255).astype(np.uint8) return data path0 = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00000/T1wCE/Image-1.dcm' img1 = load_dicom(path0) img2 = cv2.resize(img1, (9, 9)) train_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train' trainset = [] trainlabel = [] trainidt = [] for i in tqdm(range(len(labels))): idt = labels.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(train_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 trainset += [image] trainlabel += [labels.loc[i, 'MGMT_value']] trainidt += [idt] test_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test' testset = [] testidt = [] for i in tqdm(range(len(sample))): idt = sample.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(test_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 testset += [image] testidt += [idt] from tensorflow.keras.utils import to_categorical y0 = np.array(trainlabel) Y_train = to_categorical(y0) X_train = np.array(trainset) X_test = np.array(testset) model = Sequential() model.add(Conv2D(64, (4, 4), input_shape=(9, 9, 1), activation='relu')) model.add(Conv2D(32, (2, 2), activation='relu')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() his = model.fit(X_train, Y_train, validation_split=0.2, epochs=100, batch_size=64, verbose=2) y_pred = model.predict(X_test) pred = np.argmax(y_pred, axis=1) result = pd.DataFrame(testidt) result[1] = pred result.columns = ['BraTS21ID', 'MGMT_value'] result2 = result.groupby('BraTS21ID', as_index=False).mean() result2 result2['BraTS21ID'] = sample['BraTS21ID'] result2['MGMT_value'] = result2['MGMT_value'].apply(lambda x: round(x * 10) / 10) result2.to_csv('submission.csv', index=False) result2
code
72118922/cell_8
[ "text_html_output_1.png" ]
from tensorflow.keras.preprocessing.image import load_img, img_to_array from tqdm.notebook import tqdm import cv2 import numpy as np import os import pandas as pd import pydicom train_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train') test_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/test') labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels sample = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv') sample def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) data = (data * 255).astype(np.uint8) return data path0 = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00000/T1wCE/Image-1.dcm' img1 = load_dicom(path0) img2 = cv2.resize(img1, (9, 9)) train_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train' trainset = [] trainlabel = [] trainidt = [] for i in tqdm(range(len(labels))): idt = labels.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(train_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 trainset += [image] trainlabel += [labels.loc[i, 'MGMT_value']] trainidt += [idt] test_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test' testset = [] testidt = [] for i in tqdm(range(len(sample))): idt = sample.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(test_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 testset += [image] testidt += [idt]
code
72118922/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels
code
72118922/cell_17
[ "text_plain_output_1.png" ]
from keras.models import Sequential from tensorflow.keras.layers import Dense,Flatten,Dropout,Conv2D,MaxPooling2D,BatchNormalization from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical from tensorflow.keras.utils import to_categorical from tqdm.notebook import tqdm import cv2 import numpy as np import os import pandas as pd import pydicom train_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train') test_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/test') labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels sample = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv') sample def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) data = (data * 255).astype(np.uint8) return data path0 = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00000/T1wCE/Image-1.dcm' img1 = load_dicom(path0) img2 = cv2.resize(img1, (9, 9)) train_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train' trainset = [] trainlabel = [] trainidt = [] for i in tqdm(range(len(labels))): idt = labels.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(train_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 trainset += [image] trainlabel += [labels.loc[i, 'MGMT_value']] trainidt += [idt] test_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test' testset = [] testidt = [] for i in tqdm(range(len(sample))): idt = sample.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(test_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 testset += [image] testidt += [idt] from tensorflow.keras.utils import to_categorical y0 = np.array(trainlabel) Y_train = to_categorical(y0) X_train = np.array(trainset) X_test = np.array(testset) model = Sequential() model.add(Conv2D(64, (4, 4), input_shape=(9, 9, 1), activation='relu')) model.add(Conv2D(32, (2, 2), activation='relu')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() his = model.fit(X_train, Y_train, validation_split=0.2, epochs=100, batch_size=64, verbose=2) y_pred = model.predict(X_test) pred = np.argmax(y_pred, axis=1) result = pd.DataFrame(testidt) result[1] = pred result.columns = ['BraTS21ID', 'MGMT_value'] result2 = result.groupby('BraTS21ID', as_index=False).mean() result2
code
72118922/cell_14
[ "text_plain_output_1.png" ]
from keras.models import Sequential from tensorflow.keras.layers import Dense,Flatten,Dropout,Conv2D,MaxPooling2D,BatchNormalization from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical from tensorflow.keras.utils import to_categorical from tqdm.notebook import tqdm import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pydicom train_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train') test_name = os.listdir('../input/rsna-miccai-brain-tumor-radiogenomic-classification/test') labels = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv') labels sample = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv') sample def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) data = (data * 255).astype(np.uint8) return data path0 = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00000/T1wCE/Image-1.dcm' img1 = load_dicom(path0) img2 = cv2.resize(img1, (9, 9)) train_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train' trainset = [] trainlabel = [] trainidt = [] for i in tqdm(range(len(labels))): idt = labels.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(train_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 trainset += [image] trainlabel += [labels.loc[i, 'MGMT_value']] trainidt += [idt] test_dir = '../input/rsna-miccai-brain-tumor-radiogenomic-classification/test' testset = [] testidt = [] for i in tqdm(range(len(sample))): idt = sample.loc[i, 'BraTS21ID'] idt2 = ('00000' + str(idt))[-5:] path = os.path.join(test_dir, idt2, 'T1wCE') for im in os.listdir(path): img = load_dicom(os.path.join(path, im)) img = cv2.resize(img, (9, 9)) image = img_to_array(img) image = image / 255.0 testset += [image] testidt += [idt] from tensorflow.keras.utils import to_categorical y0 = np.array(trainlabel) Y_train = to_categorical(y0) X_train = np.array(trainset) X_test = np.array(testset) model = Sequential() model.add(Conv2D(64, (4, 4), input_shape=(9, 9, 1), activation='relu')) model.add(Conv2D(32, (2, 2), activation='relu')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() his = model.fit(X_train, Y_train, validation_split=0.2, epochs=100, batch_size=64, verbose=2) get_acc = his.history['accuracy'] value_acc = his.history['val_accuracy'] get_loss = his.history['loss'] validation_loss = his.history['val_loss'] epochs = range(len(get_acc)) plt.plot(epochs, get_acc, 'r', label='Accuracy of Training data') plt.plot(epochs, value_acc, 'b', label='Accuracy of Validation data') plt.title('Training vs validation accuracy') plt.legend(loc=0) plt.figure() plt.show()
code
72118922/cell_12
[ "text_html_output_1.png" ]
from keras.models import Sequential from tensorflow.keras.layers import Dense,Flatten,Dropout,Conv2D,MaxPooling2D,BatchNormalization model = Sequential() model.add(Conv2D(64, (4, 4), input_shape=(9, 9, 1), activation='relu')) model.add(Conv2D(32, (2, 2), activation='relu')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary()
code
90103606/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') def missing_values(data): total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum() / data.isnull().count() * 100).sort_values(ascending=False) return pd.concat([total, percent], axis=1, keys=['Number of Missing Values', 'Percentage']) customers_missing = missing_values(customers) customers_missing.loc[customers_missing['Percentage'] > 0]
code
90103606/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') customers.head()
code
90103606/cell_34
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') def missing_values(data): total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum() / data.isnull().count() * 100).sort_values(ascending=False) return pd.concat([total, percent], axis=1, keys=['Number of Missing Values', 'Percentage']) fig = px.pie(articles, values='article_id', title='Distribution by Index Group Name', names='index_group_name', color_discrete_sequence=px.colors.sequential.RdBu, hover_data=['index_group_name'], labels={'index_group_name':'Index Group Name'}, height=450) fig.show() fig = px.histogram(articles, x='garment_group_name',color="index_group_name", title="Index Group Name per Garment Group Name", color_discrete_sequence=px.colors.sequential.Agsunset, height=600) fig.show() df1 = articles.groupby(["section_name"]).count().reset_index() fig = px.bar(df1, x=articles.groupby(["section_name"]).size(), y="section_name", color='section_name', title='Distribution by Section Name', hover_data=['section_name'], text_auto='.2s', labels={'section_name':'Section Name'}, orientation='h', color_discrete_sequence=px.colors.diverging.Temps, height=1000) fig.update_traces(textfont_size=11, textangle=0, textposition="outside", cliponaxis=False) fig.update_layout(xaxis_title = 'Count') fig.show() df4= articles.groupby(["index_name"])["article_id"].nunique() df4 = pd.DataFrame({'IndexName': df4.index, 'Articles': df4.values }) labels=df4['IndexName'] values=df4['Articles'] fig = px.pie(labels, values = values, hole = 0.35, names = labels, title = 'Distribution by Index Name', color_discrete_sequence =px.colors.cyclical.mygbm ) fig.show() df5 = articles.groupby(["perceived_colour_master_name"]).count().reset_index() colors = ['#F5F5DC','#000000','#023e8a','#168aad','#7f5539','#90be6d','#b7b7a4','#606c38','#9d4edd','#b7b7a4','#9e2a2b','#f77f00','#ffafcc','#d00000','#34a0a4','#3e1f47','#ffffff','#fcbf49','#dddf00','#9e0059'] fig = px.bar(df5, y=articles.groupby(["perceived_colour_master_name"]).size(), x="perceived_colour_master_name", color='perceived_colour_master_name', hover_data=['perceived_colour_master_name'], text_auto='.2s', color_discrete_sequence =colors, title='Distribution by Percieved Color Master Name', labels={'perceived_colour_master_name':'Percieved Color Master Name'}) fig.update_traces(textfont_size=11, textangle=0, textposition="outside", cliponaxis=False) fig.update_layout(yaxis_title = 'Count') fig.show() fig = px.histogram(customers, x='age', range_x=['0', '100'], title='Age Distribution', height=450) fig.show()
code
90103606/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') def missing_values(data): total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum() / data.isnull().count() * 100).sort_values(ascending=False) return pd.concat([total, percent], axis=1, keys=['Number of Missing Values', 'Percentage']) transactions_missing = missing_values(transactions) transactions_missing.loc[transactions_missing['Percentage'] > 0]
code
90103606/cell_30
[ "text_html_output_2.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') fig = px.pie(articles, values='article_id', title='Distribution by Index Group Name', names='index_group_name', color_discrete_sequence=px.colors.sequential.RdBu, hover_data=['index_group_name'], labels={'index_group_name':'Index Group Name'}, height=450) fig.show() fig = px.histogram(articles, x='garment_group_name',color="index_group_name", title="Index Group Name per Garment Group Name", color_discrete_sequence=px.colors.sequential.Agsunset, height=600) fig.show() df1 = articles.groupby(['section_name']).count().reset_index() fig = px.bar(df1, x=articles.groupby(['section_name']).size(), y='section_name', color='section_name', title='Distribution by Section Name', hover_data=['section_name'], text_auto='.2s', labels={'section_name': 'Section Name'}, orientation='h', color_discrete_sequence=px.colors.diverging.Temps, height=1000) fig.update_traces(textfont_size=11, textangle=0, textposition='outside', cliponaxis=False) fig.update_layout(xaxis_title='Count') fig.show()
code
90103606/cell_6
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import os from tqdm import tqdm import matplotlib.pyplot as plt import seaborn as sns from tabulate import tabulate import cufflinks as cf import plotly.express as px import plotly.graph_objects as go
code
90103606/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') articles.head()
code
90103606/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') def missing_values(data): total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum() / data.isnull().count() * 100).sort_values(ascending=False) return pd.concat([total, percent], axis=1, keys=['Number of Missing Values', 'Percentage']) articles_missing = missing_values(articles) articles_missing.loc[articles_missing['Percentage'] > 0]
code
90103606/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90103606/cell_32
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') def missing_values(data): total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum() / data.isnull().count() * 100).sort_values(ascending=False) return pd.concat([total, percent], axis=1, keys=['Number of Missing Values', 'Percentage']) fig = px.pie(articles, values='article_id', title='Distribution by Index Group Name', names='index_group_name', color_discrete_sequence=px.colors.sequential.RdBu, hover_data=['index_group_name'], labels={'index_group_name':'Index Group Name'}, height=450) fig.show() fig = px.histogram(articles, x='garment_group_name',color="index_group_name", title="Index Group Name per Garment Group Name", color_discrete_sequence=px.colors.sequential.Agsunset, height=600) fig.show() df1 = articles.groupby(["section_name"]).count().reset_index() fig = px.bar(df1, x=articles.groupby(["section_name"]).size(), y="section_name", color='section_name', title='Distribution by Section Name', hover_data=['section_name'], text_auto='.2s', labels={'section_name':'Section Name'}, orientation='h', color_discrete_sequence=px.colors.diverging.Temps, height=1000) fig.update_traces(textfont_size=11, textangle=0, textposition="outside", cliponaxis=False) fig.update_layout(xaxis_title = 'Count') fig.show() df4= articles.groupby(["index_name"])["article_id"].nunique() df4 = pd.DataFrame({'IndexName': df4.index, 'Articles': df4.values }) labels=df4['IndexName'] values=df4['Articles'] fig = px.pie(labels, values = values, hole = 0.35, names = labels, title = 'Distribution by Index Name', color_discrete_sequence =px.colors.cyclical.mygbm ) fig.show() df5 = articles.groupby(['perceived_colour_master_name']).count().reset_index() colors = ['#F5F5DC', '#000000', '#023e8a', '#168aad', '#7f5539', '#90be6d', '#b7b7a4', '#606c38', '#9d4edd', '#b7b7a4', '#9e2a2b', '#f77f00', '#ffafcc', '#d00000', '#34a0a4', '#3e1f47', '#ffffff', '#fcbf49', '#dddf00', '#9e0059'] fig = px.bar(df5, y=articles.groupby(['perceived_colour_master_name']).size(), x='perceived_colour_master_name', color='perceived_colour_master_name', hover_data=['perceived_colour_master_name'], text_auto='.2s', color_discrete_sequence=colors, title='Distribution by Percieved Color Master Name', labels={'perceived_colour_master_name': 'Percieved Color Master Name'}) fig.update_traces(textfont_size=11, textangle=0, textposition='outside', cliponaxis=False) fig.update_layout(yaxis_title='Count') fig.show()
code
90103606/cell_28
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') fig = px.pie(articles, values='article_id', title='Distribution by Index Group Name', names='index_group_name', color_discrete_sequence=px.colors.sequential.RdBu, hover_data=['index_group_name'], labels={'index_group_name':'Index Group Name'}, height=450) fig.show() fig = px.histogram(articles, x='garment_group_name', color='index_group_name', title='Index Group Name per Garment Group Name', color_discrete_sequence=px.colors.sequential.Agsunset, height=600) fig.show()
code
90103606/cell_8
[ "text_html_output_1.png" ]
import os import os import numpy as np import pandas as pd import os l = os.listdir('/kaggle/input/h-and-m-personalized-fashion-recommendations/') print(f'Folders: {l}')
code
90103606/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') transactions.head()
code
90103606/cell_31
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') def missing_values(data): total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum() / data.isnull().count() * 100).sort_values(ascending=False) return pd.concat([total, percent], axis=1, keys=['Number of Missing Values', 'Percentage']) fig = px.pie(articles, values='article_id', title='Distribution by Index Group Name', names='index_group_name', color_discrete_sequence=px.colors.sequential.RdBu, hover_data=['index_group_name'], labels={'index_group_name':'Index Group Name'}, height=450) fig.show() fig = px.histogram(articles, x='garment_group_name',color="index_group_name", title="Index Group Name per Garment Group Name", color_discrete_sequence=px.colors.sequential.Agsunset, height=600) fig.show() df1 = articles.groupby(["section_name"]).count().reset_index() fig = px.bar(df1, x=articles.groupby(["section_name"]).size(), y="section_name", color='section_name', title='Distribution by Section Name', hover_data=['section_name'], text_auto='.2s', labels={'section_name':'Section Name'}, orientation='h', color_discrete_sequence=px.colors.diverging.Temps, height=1000) fig.update_traces(textfont_size=11, textangle=0, textposition="outside", cliponaxis=False) fig.update_layout(xaxis_title = 'Count') fig.show() df4 = articles.groupby(['index_name'])['article_id'].nunique() df4 = pd.DataFrame({'IndexName': df4.index, 'Articles': df4.values}) labels = df4['IndexName'] values = df4['Articles'] fig = px.pie(labels, values=values, hole=0.35, names=labels, title='Distribution by Index Name', color_discrete_sequence=px.colors.cyclical.mygbm) fig.show()
code
90103606/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') transactions = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') transactions.info()
code
90103606/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') articles.info()
code
90103606/cell_27
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') fig = px.pie(articles, values='article_id', title='Distribution by Index Group Name', names='index_group_name', color_discrete_sequence=px.colors.sequential.RdBu, hover_data=['index_group_name'], labels={'index_group_name': 'Index Group Name'}, height=450) fig.show()
code
90103606/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) articles = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') customers.info()
code
128009329/cell_9
[ "text_plain_output_1.png" ]
df0 = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', nrows=reads[0], dtype=dtyping, low_memory=True) mem_usage = df0.memory_usage().sum() / 1024 ** 2 print(f'Memory Usage : {mem_usage} MB')
code
128009329/cell_4
[ "text_plain_output_1.png" ]
import numpy as np # Import library NumPy (Kalkulasi dsb) import pandas as pd # Import library Pandas (Baca data) tmp = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', usecols=[0], low_memory=True) tmp = tmp.groupby('session_id').session_id.agg('count') labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') labels['session'] = labels['session_id'].str.split('_', expand=True)[0].astype(np.uint64) labels['q'] = labels['session_id'].str.split('_q', expand=True)[1].astype(int) labels labels.dtypes
code
128009329/cell_6
[ "text_html_output_1.png" ]
import gc import gc gc.enable() gc.collect()
code
128009329/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # Import library Pandas (Baca data) tmp = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', usecols=[0], low_memory=True) tmp = tmp.groupby('session_id').session_id.agg('count') display(tmp)
code
128009329/cell_11
[ "text_plain_output_1.png" ]
df0
code
128009329/cell_19
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score from sklearn.model_selection import GroupKFold import gc import joblib import numpy as np # Import library NumPy (Kalkulasi dsb) import pandas as pd # Import library Pandas (Baca data) tmp = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', usecols=[0], low_memory=True) tmp = tmp.groupby('session_id').session_id.agg('count') labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') labels['session'] = labels['session_id'].str.split('_', expand=True)[0].astype(np.uint64) labels['q'] = labels['session_id'].str.split('_q', expand=True)[1].astype(int) labels labels.dtypes import gc gc.enable() gc.collect() ITER = 10 PIECES = int(np.ceil(len(tmp) / ITER)) reads = [] skips = [0] for k in range(ITER): a = k * PIECES b = (k + 1) * PIECES if b > len(tmp): b = len(tmp) r = tmp.iloc[a:b].sum() reads.append(r) skips.append(skips[-1] + r) dtyping = {'session_id': np.uint64, 'index': np.uint8, 'elapsed_time': np.uint8, 'event_name': 'category', 'name': 'category', 'level': np.uint8, 'page': np.float32, 'room_coor_x': np.float32, 'room_coor_y': np.float32, 'screen_coor_x': np.float32, 'screen_coor_y': np.float32, 'hover_duration': np.float32, 'text': 'category', 'fqid': 'category', 'room_fqid': 'category', 'text_fqid': 'category', 'fullscreen': np.bool8, 'hq': np.bool8, 'music': np.bool8, 'level_group': 'category'} CATS = ['event_name', 'fqid', 'room_fqid', 'text'] NUMS = ['elapsed_time', 'level', 'page', 'room_coor_x', 'room_coor_y', 'screen_coor_x', 'screen_coor_y', 'hover_duration'] EVENTS = ['navigate_click', 'person_click', 'cutscene_click', 'object_click', 'map_hover', 'notification_click', 'map_click', 'observation_click', 'checkpoint'] def process_level(train): dfs = [] for c in CATS: tmp = train.groupby(['session_id', 'level_group'])[c].agg('nunique') tmp.name = tmp.name + '_nunique' dfs.append(tmp) for c in NUMS: tmp = train.groupby(['session_id', 'level_group'])[c].agg('mean') tmp.name = tmp.name + '_mean' dfs.append(tmp) for c in NUMS: tmp = train.groupby(['session_id', 'level_group'])[c].agg('std') tmp.name = tmp.name + '_std' dfs.append(tmp) for c in EVENTS: train[c] = (train.event_name == c).astype('int8') for c in EVENTS + ['elapsed_time']: tmp = train.groupby(['session_id', 'level_group'])[c].agg('sum') tmp.name = tmp.name + '_sum' dfs.append(tmp) train = train.drop(EVENTS, axis=1) df = pd.concat(dfs, axis=1) df = df.fillna(-1) df = df.reset_index() df = df.set_index('session_id') return df FEATURES = [c for c in train_df.columns if not c in ['level_group']] ALL_USERS = train_df.index.unique() train_level_df = train_df.pivot_table(columns='level_group', values=[x for x in train_df.columns if not x == 'level_group'], index='session_id', aggfunc='sum', fill_value=0) train_level_df.isna().sum() from sklearn.model_selection import GroupKFold from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score gkf = GroupKFold(n_splits=5) oof = pd.DataFrame(data=np.zeros((len(ALL_USERS), 18)), index=ALL_USERS) models = {} for i, (t, v) in enumerate(gkf.split(X=train_level_df, groups=train_level_df.index)): print(f'FOLD {i}') print('') for l in range(1, 19): xtrain = train_level_df.iloc[t] train_users = xtrain.index.values ytrain = labels.loc[labels.q == l].set_index('session').loc[train_users] xval = train_level_df.iloc[v] val_users = xval.index.values yval = labels.loc[labels.q == l].set_index('session').loc[val_users] model = LogisticRegression(random_state=0, solver='liblinear', max_iter=1500, n_jobs=-1) model.fit(xtrain[FEATURES].astype(np.float32), ytrain['correct']) yhat = model.predict_proba(xval)[:, 1] score = f1_score(yval['correct'], np.round(yhat).astype(int)) print(f'FOLD {i} LEVEL {l} : {score}') models.update({f'{i}': model}) joblib.dump(model, f'model-fold{i}-level{l}.pkl') oof.loc[val_users, l - 1] = yhat del xtrain, train_users, ytrain, xval, val_users, yval, model, yhat, score gc.collect() print()
code
128009329/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # Import library NumPy (Kalkulasi dsb) import pandas as pd # Import library Pandas (Baca data) tmp = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', usecols=[0], low_memory=True) tmp = tmp.groupby('session_id').session_id.agg('count') labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') labels['session'] = labels['session_id'].str.split('_', expand=True)[0].astype(np.uint64) labels['q'] = labels['session_id'].str.split('_q', expand=True)[1].astype(int) labels ITER = 10 PIECES = int(np.ceil(len(tmp) / ITER)) reads = [] skips = [0] for k in range(ITER): a = k * PIECES b = (k + 1) * PIECES if b > len(tmp): b = len(tmp) r = tmp.iloc[a:b].sum() reads.append(r) skips.append(skips[-1] + r) print(f'To avoid memory error, we will read train in {PIECES} pieces of sizes:') print(reads)
code
128009329/cell_18
[ "text_plain_output_1.png" ]
FEATURES = [c for c in train_df.columns if not c in ['level_group']] ALL_USERS = train_df.index.unique() train_level_df = train_df.pivot_table(columns='level_group', values=[x for x in train_df.columns if not x == 'level_group'], index='session_id', aggfunc='sum', fill_value=0) train_level_df.isna().sum()
code
128009329/cell_15
[ "text_plain_output_1.png" ]
FEATURES = [c for c in train_df.columns if not c in ['level_group']] print('We will train with', len(FEATURES), 'features') ALL_USERS = train_df.index.unique() print('We will train with', len(ALL_USERS), 'users info')
code
128009329/cell_16
[ "text_plain_output_1.png" ]
FEATURES = [c for c in train_df.columns if not c in ['level_group']] ALL_USERS = train_df.index.unique() train_df
code
128009329/cell_3
[ "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np # Import library NumPy (Kalkulasi dsb) import pandas as pd # Import library Pandas (Baca data) tmp = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', usecols=[0], low_memory=True) tmp = tmp.groupby('session_id').session_id.agg('count') labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') labels['session'] = labels['session_id'].str.split('_', expand=True)[0].astype(np.uint64) labels['q'] = labels['session_id'].str.split('_q', expand=True)[1].astype(int) labels
code
128009329/cell_17
[ "text_plain_output_1.png" ]
FEATURES = [c for c in train_df.columns if not c in ['level_group']] ALL_USERS = train_df.index.unique() train_level_df = train_df.pivot_table(columns='level_group', values=[x for x in train_df.columns if not x == 'level_group'], index='session_id', aggfunc='sum', fill_value=0) display(train_level_df)
code
128009329/cell_14
[ "text_plain_output_1.png" ]
all_pieces = [] for k in range(ITER): print(k, ',', end=' ') SKIPS = 0 if k > 0: SKIPS = range(1, skips[k] + 1) train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', nrows=reads[k], skiprows=SKIPS, dtype=dtyping, low_memory=True) df = process_level(train) all_pieces.append(df) del train del df gc.collect() print('\n') train_df = pd.concat(all_pieces, axis=0) print(f'Shape of Train DF : {train_df.shape}') display(train_df.head())
code
128009329/cell_12
[ "text_html_output_1.png" ]
df0.groupby(['session_id', 'level'])['elapsed_time'].sum()
code
128009329/cell_5
[ "text_html_output_1.png" ]
import numpy as np # Import library NumPy (Kalkulasi dsb) import pandas as pd # Import library Pandas (Baca data) tmp = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', usecols=[0], low_memory=True) tmp = tmp.groupby('session_id').session_id.agg('count') labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') labels['session'] = labels['session_id'].str.split('_', expand=True)[0].astype(np.uint64) labels['q'] = labels['session_id'].str.split('_q', expand=True)[1].astype(int) labels labels.dtypes labels['q'].value_counts()
code
1005554/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns)) df.describe()
code
1005554/cell_6
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns)) col_names = df.columns bool_cols = [] nonbool_cols = [] for c in col_names: if len(df[c].unique()) == 2: bool_cols.append(c) df[c] = df[c].astype('bool') else: nonbool_cols.append(c) df.pivot_table(index=['bacon'], values=['calories', 'rating'], aggfunc=np.mean)
code
1005554/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/epi_r.csv') df.head()
code
1005554/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pylab as pylab import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns)) col_names = df.columns bool_cols = [] nonbool_cols = [] for c in col_names: if len(df[c].unique()) == 2: bool_cols.append(c) df[c] = df[c].astype('bool') else: nonbool_cols.append(c) df.pivot_table(index=['bacon'], values=['calories', 'rating'], aggfunc=np.mean) pylab.rcParams['figure.figsize'] = (12, 12) corrmat = df[bool_cols[0:20]].corr() sns.heatmap(corrmat, vmax=0.8, square=True)
code
1005554/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns))
code
1005554/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns)) col_names = df.columns bool_cols = [] nonbool_cols = [] for c in col_names: if len(df[c].unique()) == 2: bool_cols.append(c) df[c] = df[c].astype('bool') else: nonbool_cols.append(c) print('{} non-boolean columns'.format(len(col_names) - len(bool_cols))) print(sorted(nonbool_cols))
code
49120031/cell_2
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os df = pd.read_csv('/kaggle/input/cassava-leaf-disease-classification/train.csv') [os.mkdir(os.path.join('/kaggle/working', 'label_' + str(x))) for x in df.label.unique()]
code
49120031/cell_7
[ "text_plain_output_1.png" ]
from keras_preprocessing.image import ImageDataGenerator import os import tensorflow as tf from keras_preprocessing.image import ImageDataGenerator from tensorflow import keras from tensorflow.keras import layers train_data_dir = '/kaggle/working/' img_height = 300 img_width = 300 batch_size = 64 num_classes = 5 train_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, validation_split=0.1) train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='categorical', subset='training') validation_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='categorical', subset='validation')
code
49120031/cell_3
[ "text_plain_output_1.png" ]
from shutil import copyfile import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os df = pd.read_csv('/kaggle/input/cassava-leaf-disease-classification/train.csv') [os.mkdir(os.path.join('/kaggle/working', 'label_' + str(x))) for x in df.label.unique()] df = pd.read_csv('/kaggle/input/cassava-leaf-disease-classification/train.csv') from shutil import copyfile for a, b in df.iterrows(): src = os.path.join('/kaggle/input/cassava-leaf-disease-classification/train_images', b.image_id) dst = os.path.join('/kaggle/working', 'label_' + str(b.label), b.image_id) print(src) print(dst) copyfile(src, dst)
code
88100444/cell_4
[ "text_html_output_1.png" ]
from matplotlib.pyplot import figure import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', '3Pper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] X.describe()
code
88100444/cell_6
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', '3Pper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) bd_model.fit(train_X, train_y)
code