path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122252043/cell_8
[ "text_plain_output_1.png" ]
41215
code
122252043/cell_80
[ "text_plain_output_1.png" ]
a = 12670 b = 12.344 print(f'a={a:>07,d},b={b:>+6.0f}')
code
122252043/cell_15
[ "text_plain_output_1.png" ]
73
code
122252043/cell_16
[ "text_plain_output_1.png" ]
217366
code
122252043/cell_38
[ "text_plain_output_1.png" ]
x = 5 x += 2 x = 12 y = 8 x ^ y
code
122252043/cell_47
[ "text_plain_output_1.png" ]
True + (not 'piggy')
code
122252043/cell_66
[ "text_plain_output_1.png" ]
int('0b1001001', base=2)
code
122252043/cell_35
[ "text_plain_output_1.png" ]
x = 5 x += 2 x = 12 y = 8 x & y
code
122252043/cell_77
[ "text_plain_output_1.png" ]
a = 12670 b = 12.344 print(f'a={a:>7d},b={b:>6.2f}')
code
122252043/cell_43
[ "text_plain_output_1.png" ]
x = 12 y = 8 y << 2
code
122252043/cell_24
[ "text_plain_output_1.png" ]
128 % 39
code
122252043/cell_14
[ "text_plain_output_1.png" ]
202
code
122252043/cell_22
[ "text_plain_output_1.png" ]
2 ** 120
code
122252043/cell_53
[ "text_plain_output_1.png" ]
eval('66+18')
code
122252043/cell_10
[ "text_plain_output_1.png" ]
11325731
code
122252043/cell_37
[ "text_plain_output_1.png" ]
x = 5 x += 2 x = 12 y = 8 ~x
code
122252043/cell_12
[ "text_plain_output_1.png" ]
492
code
122252043/cell_71
[ "text_plain_output_1.png" ]
print("'Holliday'")
code
122252043/cell_5
[ "text_plain_output_1.png" ]
type(6000.0)
code
122252043/cell_36
[ "text_plain_output_1.png" ]
x = 5 x += 2 x = 12 y = 8 x | y
code
327702/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import svm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/pitching.csv') df df_sg = df[df.gs == df.g] Y = df_sg.w / df_sg.gs Y_class = np.floor(Y) clf = svm.SVC() clf.fit(X, Y_class)
code
327702/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
327702/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
X
code
327702/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/pitching.csv') df
code
32071198/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from functools import partial from joblib import Parallel, delayed from sklearn.compose import ColumnTransformer from sklearn.ensemble import VotingRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import cohen_kappa_score from sklearn.metrics import confusion_matrix, cohen_kappa_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.wrappers.scikit_learn import KerasRegressor import multiprocessing import numpy as np import pandas as pd import scipy as sp import tensorflow as tf import xgboost as xgb def applyParallel(dfGrouped, func): retLst = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(func)(name, group) for name, group in dfGrouped) return pd.DataFrame(retLst) last_event_before_assessment = {'Cauldron Filler (Assessment)': '90d848e0', 'Cart Balancer (Assessment)': '7ad3efc6', 'Mushroom Sorter (Assessment)': '3bfd1a65', 'Bird Measurer (Assessment)': 'f56e0afc', 'Chest Sorter (Assessment)': '5b49460a'} media_seq = pd.read_csv('../input/dsb-feats-v2/media_sequence.csv') clips_seq = media_seq[media_seq.type=='Clip'] clip_times = dict(zip(clips_seq.title, clips_seq.duration)) def read_data(): print('Reading train.csv file....') train = pd.read_csv('train.csv') print('Training.csv file have {} rows and {} columns'.format(train.shape[0], train.shape[1])) print('Reading test.csv file....') test = pd.read_csv('test.csv') print('Test.csv file have {} rows and {} columns'.format(test.shape[0], test.shape[1])) print('Reading train_labels.csv file....') train_labels = pd.read_csv('train_labels.csv') print('Train_labels.csv file have {} rows and {} columns'.format(train_labels.shape[0], train_labels.shape[1])) print('Reading specs.csv file....') specs = pd.read_csv('specs.csv') print('Specs.csv file have {} rows and {} columns'.format(specs.shape[0], specs.shape[1])) return train, test, train_labels, specs def get_worst_score(group): return group.sort_values('accuracy_group').iloc[0] def is_assessment(titles_series): def is_assessment_title(title): return "assessment" in title.lower() return titles_series.apply(lambda x: is_assessment_title(x)) def num_unique_days(timestamps): return pd.to_datetime(timestamps).apply(lambda x: x.date()).unique().size def days_since_first_event(timestamps): dates = pd.to_datetime(timestamps).apply(lambda x: x.date()) return (dates.max() - dates.min()).days def get_events_before_game_session(events, game_session, assessment_title): if not (game_session or assessment_title): return events else: assessment_event = last_event_before_assessment.get(assessment_title) game_session_index = events.index[(events.game_session == game_session) & \ (events.event_id.isin([assessment_event] if assessment_event else last_event_before_assessment.values()))] return events.loc[:game_session_index[-1]] def get_clip_duration_features(events): clips = events[events.type=='Clip'] if clips.empty: game_time = 0 skip_rate = 0 avg_watch_length = 0 else: game_time = clips.apply(lambda x: min(x.ts_diff, clip_times.get(x.title)), axis=1).sum() skip_rate = clips.apply(lambda x: x.ts_diff < clip_times.get(x.title), axis=1).mean() avg_watch_length = clips.apply(lambda x: min(x.ts_diff / clip_times.get(x.title), 1), axis=1).mean() return pd.Series([game_time, skip_rate, avg_watch_length], index=['clip_game_time', 'clip_skip_rate', 'clip_avg_watch_length'], dtype=float) def group_by_game_session_and_sum(events, columns): """ some columns are rolling counts by game session, take the max value of each game session then sum for totals """ series = pd.Series(dtype=int) for c in columns: # set beginning values for each type to 0 for stype in ['activity', 'game', 'assessment', 'clip']: series[stype+'_'+c] = 0 series['total_'+c] = 0 # get session type and total values and add to running total for session_id, session in events.groupby('game_session'): session_type = session['type'].iloc[0].lower() session_value = session[c].max() / 1000.0 if c=='game_time' else session[c].max() series[session_type+'_'+c] += session_value series['total_'+c] += session_value if c=='game_time': series = series.drop(labels='clip_'+c) series = series.append(get_clip_duration_features(events)) return series def summarize_events(events): """ takes a dataframe of events and returns a pd.Series with aggregate/summary values """ events = events.sort_values('ts').reset_index() events['ts_diff'] = -events.ts.diff(-1).dt.total_seconds() numeric_rows = ['event_count', 'game_time'] aggregates = group_by_game_session_and_sum(events, numeric_rows) aggregates['num_unique_days'] = num_unique_days(events['timestamp']) aggregates['elapsed_days'] = days_since_first_event(events['timestamp']) aggregates['last_world'] = events.tail(1)['world'].values[0] aggregates['last_assessment'] = events[is_assessment(events['title'])].tail(1)['title'].values[0] aggregates['assessments_taken'] = events['title'][events.event_id.isin(last_event_before_assessment.values())].value_counts() aggregates['type_counts'] = events[['game_session', 'type']].drop_duplicates()['type'].value_counts() aggregates['title_counts'] = events[['game_session', 'title']].drop_duplicates()['title'].value_counts() aggregates['event_code_counts'] = events.event_code.value_counts() aggregates['event_id_counts'] = events.event_id.value_counts() aggregates['unique_game_sessions'] = events.game_session.unique().size return aggregates def summarize_events_before_game_session(name, events): if not isinstance(name, (list,tuple)) or len(name)==1: # for test data game_session=None assessment=None name_series = pd.Series([name], index=['installation_id']) else: installation_id, game_session, assessment = name name_series = pd.Series(name, index=['installation_id', 'game_session_y', 'title_y']) events = events.rename(columns={'game_session_x': 'game_session', 'title_x': 'title'}, errors='ignore') events_before = get_events_before_game_session(events, game_session, assessment) aggregates = summarize_events(events_before) try: labels = events[['num_correct', 'num_incorrect', 'accuracy', 'accuracy_group']].iloc[0] \ .append(name_series) row = aggregates.append(labels) except KeyError: row = aggregates.append(name_series) # print("no label columns, just returning features") return row def expand_count_features(features): print('**expanding event type count features**') expanded_type_counts = features.type_counts.apply(pd.Series).fillna(0) # rename the type count columns expanded_type_counts.columns = [c.lower()+'_ct' for c in expanded_type_counts.columns] print('**expanding title count features**') expanded_title_counts = features.title_counts.apply(pd.Series).fillna(0) # rename the type count columns expanded_title_counts.columns = [c.lower().replace(' ', '_')+'_ct' for c in expanded_title_counts.columns] print('**expanding event code count features**') expanded_event_code_counts = features.event_code_counts.apply(pd.Series).fillna(0) # rename the event_code count columns expanded_event_code_counts.columns = ['event_{}_ct'.format(int(c)) for c in expanded_event_code_counts.columns] # non_zero_event_code_counts for ec in expanded_event_code_counts.columns: expanded_event_code_counts['non_zero_'+ec] = (expanded_event_code_counts[ec] > 0).astype(int) print('**expanding event id count features**') expanded_event_id_counts = features.event_id_counts.apply(pd.Series).fillna(0) # rename the event_id count columns expanded_event_id_counts.columns = ['eid_{}_ct'.format(c) for c in expanded_event_id_counts.columns] expanded_assessments_taken = features.assessments_taken.apply(pd.Series).fillna(0) feats = pd.concat([features.drop(['type_counts', 'title_counts', 'event_code_counts', 'event_id_counts', 'assessments_taken'], axis=1), expanded_type_counts, expanded_title_counts, expanded_event_code_counts, expanded_event_id_counts, expanded_assessments_taken], axis=1) return feats def split_features_and_labels(df): labels_df = df[['title_y', 'num_correct', 'num_incorrect', 'accuracy', 'accuracy_group', 'installation_id', 'game_session_y']].copy() feats_df = df.drop( ['title_y', 'num_correct', 'num_incorrect', 'game_session_y', 'accuracy', 'accuracy_group'], axis=1) return feats_df, labels_df def basic_user_features_transform(train_data, train_labels=None): data = train_data[['event_id', 'game_session', 'timestamp', 'installation_id', 'event_count', 'event_code', 'game_time', 'title', 'type', 'world']] data['ts'] = pd.to_datetime(data.timestamp) if train_labels is not None: train_w_labels = data.merge(train_labels, on='installation_id') groups = train_w_labels.groupby(['installation_id', 'game_session_y', 'title_y']) else: groups = data.groupby(['installation_id']) # game session y is index 1 of the group name # passing none to game session is for eval data, does not subset any of the data for each installation_id print('**getting user features before each training assessment**') features = applyParallel(groups, lambda name, group: summarize_events_before_game_session(name, group)) expanded_features = expand_count_features(features) if train_labels is not None: return split_features_and_labels(expanded_features) else: return expanded_features, None def get_data_processing_pipe(feats, log_features, categorical_features): # We create the preprocessing pipelines for both numeric and categorical data. numeric_features = [c for c in feats.columns if c not in log_features+categorical_features+['installation_id']] numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(fill_value=0, strategy='constant')), ('scaler', StandardScaler())]) numeric_log_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(fill_value=0, strategy='constant')), ('log_scale', FunctionTransformer(np.log1p)), ('scaler', StandardScaler())]) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='constant', fill_value='missing')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer( remainder='drop', transformers=[ ('num', numeric_transformer, numeric_features), ('num_log', numeric_log_transformer, log_features), ('cat', categorical_transformer, categorical_features)]) return preprocessor class OrdinalRegressor: def __init__(self, clf, **kwargs): self.clf = clf(**kwargs) self.threshold_optimizer = OptimizedRounder([0, 1, 2, 3]) def fit(self, X, y, **fit_params): self.clf.fit(X, y, **fit_params) self.threshold_optimizer.fit(self.predict(X), y) def predict(self, X, **predict_params): pred = self.clf.predict(X) if predict_params.get('classify'): return self.classify(pred) return pred def set_params(self, **kwargs): self.clf = self.clf.set_params(**kwargs) def classify(self, pred): return self.threshold_optimizer.predict(pred) def predict_and_classify(self, X): return self.classify(self.predict(X)) def predict_proba(self, X): return self.predict_and_classify(X) class OptimizedRounder(object): def __init__(self, labels): self.coef_ = 0 self.labels = labels def _kappa_loss(self, coef, X, y): if len(set(coef)) != len(coef): return 0 preds = pd.cut(X, [-np.inf] + list(np.sort(coef)) + [np.inf], labels=self.labels) return -cohen_kappa_score(y, preds, weights='quadratic') def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5] constraints = ({'type': 'ineq', 'fun': lambda x: x[1] - x[0] - 0.001}, {'type': 'ineq', 'fun': lambda x: x[2] - x[1] - 0.001}) self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='COBYLA', constraints=constraints) def predict(self, X, coef=None): coef = coef if coef else self.coefficients() preds = pd.cut(X, [-np.inf] + list(np.sort(coef)) + [np.inf], labels=self.labels) return preds def coefficients(self): return self.coef_['x'] feats = installation_features = pd.read_csv('../input/dsb-feats-v2/installation_features_v2.csv') labels = installation_labels = pd.read_csv('../input/dsb-feats-v2/installation_labels_v2.csv') test = pd.read_csv('../input/data-science-bowl-2019/test.csv') from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, cohen_kappa_score import xgboost as xgb from sklearn.utils import class_weight from sklearn.ensemble import VotingRegressor import tensorflow as tf from tensorflow.keras.wrappers.scikit_learn import KerasRegressor from tensorflow import keras from tensorflow.keras import layers def build_model(input_shape=[538], hidden_units=[64], learning_rate=0.003, dropout=0, l1=0, l2=0, epochs=20): model = keras.Sequential([layers.Input(input_shape)]) for hu in hidden_units: model.add(layers.Dense(hu, activation=tf.keras.layers.LeakyReLU(), kernel_regularizer=keras.regularizers.l1_l2(l1=l1, l2=l2))) model.add(layers.Dropout(dropout)) model.add(layers.Dense(1)) optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model class KerasRegressor_v2(KerasRegressor): def __init__(self, build_fn, **kwargs): super().__init__(build_fn, **kwargs) self._estimator_type = 'regressor' feature_pipe = get_data_processing_pipe(feats, log_features=list(filter(lambda c: c.startswith('event') or c.endswith('event_count') or c.endswith('game_time') or c.startswith('eid_'), feats.columns)), categorical_features=['last_world', 'last_assessment']) xgb_params = {'colsample_bytree': 0.3, 'learning_rate': 0.03, 'max_depth': 7, 'n_estimators': 300, 'reg_alpha': 10, 'subsample': 0.8} mlp_params = {'dropout': 0.1, 'epochs': 20, 'hidden_units': (128, 128), 'l1': 0.001, 'l2': 0.0, 'learning_rate': 0.0001} ordinal_pipe = Pipeline(steps=[('preprocess', feature_pipe), ('clf', OrdinalRegressor(VotingRegressor, estimators=[('xgb', xgb.XGBRegressor(**xgb_params)), ('mlp', KerasRegressor_v2(build_model, **mlp_params))], weights=(0.7, 0.3)))]) test_feats, _ = basic_user_features_transform(test) for c in feats.columns: if c not in test_feats.columns: test_feats[c] = 0 installation_ids = test_feats.installation_id test_X = test_feats
code
32071198/cell_6
[ "text_plain_output_1.png" ]
from functools import partial from joblib import Parallel, delayed from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.metrics import cohen_kappa_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer import multiprocessing import numpy as np import pandas as pd import scipy as sp def applyParallel(dfGrouped, func): retLst = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(func)(name, group) for name, group in dfGrouped) return pd.DataFrame(retLst) last_event_before_assessment = {'Cauldron Filler (Assessment)': '90d848e0', 'Cart Balancer (Assessment)': '7ad3efc6', 'Mushroom Sorter (Assessment)': '3bfd1a65', 'Bird Measurer (Assessment)': 'f56e0afc', 'Chest Sorter (Assessment)': '5b49460a'} media_seq = pd.read_csv('../input/dsb-feats-v2/media_sequence.csv') clips_seq = media_seq[media_seq.type=='Clip'] clip_times = dict(zip(clips_seq.title, clips_seq.duration)) def read_data(): print('Reading train.csv file....') train = pd.read_csv('train.csv') print('Training.csv file have {} rows and {} columns'.format(train.shape[0], train.shape[1])) print('Reading test.csv file....') test = pd.read_csv('test.csv') print('Test.csv file have {} rows and {} columns'.format(test.shape[0], test.shape[1])) print('Reading train_labels.csv file....') train_labels = pd.read_csv('train_labels.csv') print('Train_labels.csv file have {} rows and {} columns'.format(train_labels.shape[0], train_labels.shape[1])) print('Reading specs.csv file....') specs = pd.read_csv('specs.csv') print('Specs.csv file have {} rows and {} columns'.format(specs.shape[0], specs.shape[1])) return train, test, train_labels, specs def get_worst_score(group): return group.sort_values('accuracy_group').iloc[0] def is_assessment(titles_series): def is_assessment_title(title): return "assessment" in title.lower() return titles_series.apply(lambda x: is_assessment_title(x)) def num_unique_days(timestamps): return pd.to_datetime(timestamps).apply(lambda x: x.date()).unique().size def days_since_first_event(timestamps): dates = pd.to_datetime(timestamps).apply(lambda x: x.date()) return (dates.max() - dates.min()).days def get_events_before_game_session(events, game_session, assessment_title): if not (game_session or assessment_title): return events else: assessment_event = last_event_before_assessment.get(assessment_title) game_session_index = events.index[(events.game_session == game_session) & \ (events.event_id.isin([assessment_event] if assessment_event else last_event_before_assessment.values()))] return events.loc[:game_session_index[-1]] def get_clip_duration_features(events): clips = events[events.type=='Clip'] if clips.empty: game_time = 0 skip_rate = 0 avg_watch_length = 0 else: game_time = clips.apply(lambda x: min(x.ts_diff, clip_times.get(x.title)), axis=1).sum() skip_rate = clips.apply(lambda x: x.ts_diff < clip_times.get(x.title), axis=1).mean() avg_watch_length = clips.apply(lambda x: min(x.ts_diff / clip_times.get(x.title), 1), axis=1).mean() return pd.Series([game_time, skip_rate, avg_watch_length], index=['clip_game_time', 'clip_skip_rate', 'clip_avg_watch_length'], dtype=float) def group_by_game_session_and_sum(events, columns): """ some columns are rolling counts by game session, take the max value of each game session then sum for totals """ series = pd.Series(dtype=int) for c in columns: # set beginning values for each type to 0 for stype in ['activity', 'game', 'assessment', 'clip']: series[stype+'_'+c] = 0 series['total_'+c] = 0 # get session type and total values and add to running total for session_id, session in events.groupby('game_session'): session_type = session['type'].iloc[0].lower() session_value = session[c].max() / 1000.0 if c=='game_time' else session[c].max() series[session_type+'_'+c] += session_value series['total_'+c] += session_value if c=='game_time': series = series.drop(labels='clip_'+c) series = series.append(get_clip_duration_features(events)) return series def summarize_events(events): """ takes a dataframe of events and returns a pd.Series with aggregate/summary values """ events = events.sort_values('ts').reset_index() events['ts_diff'] = -events.ts.diff(-1).dt.total_seconds() numeric_rows = ['event_count', 'game_time'] aggregates = group_by_game_session_and_sum(events, numeric_rows) aggregates['num_unique_days'] = num_unique_days(events['timestamp']) aggregates['elapsed_days'] = days_since_first_event(events['timestamp']) aggregates['last_world'] = events.tail(1)['world'].values[0] aggregates['last_assessment'] = events[is_assessment(events['title'])].tail(1)['title'].values[0] aggregates['assessments_taken'] = events['title'][events.event_id.isin(last_event_before_assessment.values())].value_counts() aggregates['type_counts'] = events[['game_session', 'type']].drop_duplicates()['type'].value_counts() aggregates['title_counts'] = events[['game_session', 'title']].drop_duplicates()['title'].value_counts() aggregates['event_code_counts'] = events.event_code.value_counts() aggregates['event_id_counts'] = events.event_id.value_counts() aggregates['unique_game_sessions'] = events.game_session.unique().size return aggregates def summarize_events_before_game_session(name, events): if not isinstance(name, (list,tuple)) or len(name)==1: # for test data game_session=None assessment=None name_series = pd.Series([name], index=['installation_id']) else: installation_id, game_session, assessment = name name_series = pd.Series(name, index=['installation_id', 'game_session_y', 'title_y']) events = events.rename(columns={'game_session_x': 'game_session', 'title_x': 'title'}, errors='ignore') events_before = get_events_before_game_session(events, game_session, assessment) aggregates = summarize_events(events_before) try: labels = events[['num_correct', 'num_incorrect', 'accuracy', 'accuracy_group']].iloc[0] \ .append(name_series) row = aggregates.append(labels) except KeyError: row = aggregates.append(name_series) # print("no label columns, just returning features") return row def expand_count_features(features): print('**expanding event type count features**') expanded_type_counts = features.type_counts.apply(pd.Series).fillna(0) # rename the type count columns expanded_type_counts.columns = [c.lower()+'_ct' for c in expanded_type_counts.columns] print('**expanding title count features**') expanded_title_counts = features.title_counts.apply(pd.Series).fillna(0) # rename the type count columns expanded_title_counts.columns = [c.lower().replace(' ', '_')+'_ct' for c in expanded_title_counts.columns] print('**expanding event code count features**') expanded_event_code_counts = features.event_code_counts.apply(pd.Series).fillna(0) # rename the event_code count columns expanded_event_code_counts.columns = ['event_{}_ct'.format(int(c)) for c in expanded_event_code_counts.columns] # non_zero_event_code_counts for ec in expanded_event_code_counts.columns: expanded_event_code_counts['non_zero_'+ec] = (expanded_event_code_counts[ec] > 0).astype(int) print('**expanding event id count features**') expanded_event_id_counts = features.event_id_counts.apply(pd.Series).fillna(0) # rename the event_id count columns expanded_event_id_counts.columns = ['eid_{}_ct'.format(c) for c in expanded_event_id_counts.columns] expanded_assessments_taken = features.assessments_taken.apply(pd.Series).fillna(0) feats = pd.concat([features.drop(['type_counts', 'title_counts', 'event_code_counts', 'event_id_counts', 'assessments_taken'], axis=1), expanded_type_counts, expanded_title_counts, expanded_event_code_counts, expanded_event_id_counts, expanded_assessments_taken], axis=1) return feats def split_features_and_labels(df): labels_df = df[['title_y', 'num_correct', 'num_incorrect', 'accuracy', 'accuracy_group', 'installation_id', 'game_session_y']].copy() feats_df = df.drop( ['title_y', 'num_correct', 'num_incorrect', 'game_session_y', 'accuracy', 'accuracy_group'], axis=1) return feats_df, labels_df def basic_user_features_transform(train_data, train_labels=None): data = train_data[['event_id', 'game_session', 'timestamp', 'installation_id', 'event_count', 'event_code', 'game_time', 'title', 'type', 'world']] data['ts'] = pd.to_datetime(data.timestamp) if train_labels is not None: train_w_labels = data.merge(train_labels, on='installation_id') groups = train_w_labels.groupby(['installation_id', 'game_session_y', 'title_y']) else: groups = data.groupby(['installation_id']) # game session y is index 1 of the group name # passing none to game session is for eval data, does not subset any of the data for each installation_id print('**getting user features before each training assessment**') features = applyParallel(groups, lambda name, group: summarize_events_before_game_session(name, group)) expanded_features = expand_count_features(features) if train_labels is not None: return split_features_and_labels(expanded_features) else: return expanded_features, None def get_data_processing_pipe(feats, log_features, categorical_features): # We create the preprocessing pipelines for both numeric and categorical data. numeric_features = [c for c in feats.columns if c not in log_features+categorical_features+['installation_id']] numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(fill_value=0, strategy='constant')), ('scaler', StandardScaler())]) numeric_log_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(fill_value=0, strategy='constant')), ('log_scale', FunctionTransformer(np.log1p)), ('scaler', StandardScaler())]) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='constant', fill_value='missing')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer( remainder='drop', transformers=[ ('num', numeric_transformer, numeric_features), ('num_log', numeric_log_transformer, log_features), ('cat', categorical_transformer, categorical_features)]) return preprocessor class OrdinalRegressor: def __init__(self, clf, **kwargs): self.clf = clf(**kwargs) self.threshold_optimizer = OptimizedRounder([0, 1, 2, 3]) def fit(self, X, y, **fit_params): self.clf.fit(X, y, **fit_params) self.threshold_optimizer.fit(self.predict(X), y) def predict(self, X, **predict_params): pred = self.clf.predict(X) if predict_params.get('classify'): return self.classify(pred) return pred def set_params(self, **kwargs): self.clf = self.clf.set_params(**kwargs) def classify(self, pred): return self.threshold_optimizer.predict(pred) def predict_and_classify(self, X): return self.classify(self.predict(X)) def predict_proba(self, X): return self.predict_and_classify(X) class OptimizedRounder(object): def __init__(self, labels): self.coef_ = 0 self.labels = labels def _kappa_loss(self, coef, X, y): if len(set(coef)) != len(coef): return 0 preds = pd.cut(X, [-np.inf] + list(np.sort(coef)) + [np.inf], labels=self.labels) return -cohen_kappa_score(y, preds, weights='quadratic') def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5] constraints = ({'type': 'ineq', 'fun': lambda x: x[1] - x[0] - 0.001}, {'type': 'ineq', 'fun': lambda x: x[2] - x[1] - 0.001}) self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='COBYLA', constraints=constraints) def predict(self, X, coef=None): coef = coef if coef else self.coefficients() preds = pd.cut(X, [-np.inf] + list(np.sort(coef)) + [np.inf], labels=self.labels) return preds def coefficients(self): return self.coef_['x'] feats = installation_features = pd.read_csv('../input/dsb-feats-v2/installation_features_v2.csv') labels = installation_labels = pd.read_csv('../input/dsb-feats-v2/installation_labels_v2.csv') test = pd.read_csv('../input/data-science-bowl-2019/test.csv') print(feats.shape) print(labels.shape) print(test.shape)
code
32071198/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer from joblib import Parallel, delayed import multiprocessing from sklearn.base import clone from collections import Counter from functools import partial from sklearn.metrics import cohen_kappa_score import scipy as sp import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129032013/cell_4
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler dir = '/kaggle/input/the-movies-dataset/' ratings = pd.read_csv(dir + 'ratings_small.csv') metadata = pd.read_csv(dir + 'movies_metadata.csv', low_memory=False) metadata = metadata.rename(columns={'id': 'movieId'}) metadata['movieId'] = pd.to_numeric(metadata['movieId'], errors='coerce') combined_data = pd.merge(ratings, metadata, on='movieId') df_grouped = combined_data.groupby(['movieId', 'userId'])['rating'].mean().reset_index() wide_df = df_grouped.pivot(index='userId', columns='movieId', values='rating') df_clean = wide_df.dropna(thresh=20, axis=1) df_clean import json import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime combined_data['release_date'] = pd.to_datetime(combined_data['release_date'], errors='coerce') def parse_genres(genres_str): genres = json.loads(genres_str.replace("'", '"')) genres_list = [g['name'] for g in genres] return genres_list combined_data['genres'] = combined_data['genres'].apply(parse_genres) combined_data['release_year'] = combined_data['release_date'].dt.year plt.figure(figsize=(10, 5)) sns.histplot(combined_data['rating'], bins=20, kde=False) plt.title('Distribution of Ratings') plt.xlabel('Rating') plt.ylabel('Count') plt.show()
code
129032013/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler dir = '/kaggle/input/the-movies-dataset/' ratings = pd.read_csv(dir + 'ratings_small.csv') metadata = pd.read_csv(dir + 'movies_metadata.csv', low_memory=False) metadata = metadata.rename(columns={'id': 'movieId'}) metadata['movieId'] = pd.to_numeric(metadata['movieId'], errors='coerce') combined_data = pd.merge(ratings, metadata, on='movieId') df_grouped = combined_data.groupby(['movieId', 'userId'])['rating'].mean().reset_index() wide_df = df_grouped.pivot(index='userId', columns='movieId', values='rating') df_clean = wide_df.dropna(thresh=20, axis=1) df_clean
code
129032013/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler dir = '/kaggle/input/the-movies-dataset/' ratings = pd.read_csv(dir + 'ratings_small.csv') metadata = pd.read_csv(dir + 'movies_metadata.csv', low_memory=False)
code
129032013/cell_7
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler dir = '/kaggle/input/the-movies-dataset/' ratings = pd.read_csv(dir + 'ratings_small.csv') metadata = pd.read_csv(dir + 'movies_metadata.csv', low_memory=False) metadata = metadata.rename(columns={'id': 'movieId'}) metadata['movieId'] = pd.to_numeric(metadata['movieId'], errors='coerce') combined_data = pd.merge(ratings, metadata, on='movieId') df_grouped = combined_data.groupby(['movieId', 'userId'])['rating'].mean().reset_index() wide_df = df_grouped.pivot(index='userId', columns='movieId', values='rating') df_clean = wide_df.dropna(thresh=20, axis=1) df_clean import json import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime combined_data['release_date'] = pd.to_datetime(combined_data['release_date'], errors='coerce') def parse_genres(genres_str): genres = json.loads(genres_str.replace("'", '"')) genres_list = [g['name'] for g in genres] return genres_list combined_data['genres'] = combined_data['genres'].apply(parse_genres) combined_data['release_year'] = combined_data['release_date'].dt.year all_genres = np.concatenate(combined_data['genres'].values) unique_genres, counts = np.unique(all_genres, return_counts=True) genre_counts = pd.DataFrame({'genre': unique_genres, 'count': counts}).sort_values(by='count', ascending=False) plt.figure(figsize=(10, 5)) sns.barplot(x='genre', y='count', data=genre_counts) plt.title('Distribution of Genres') plt.xlabel('Genre') plt.ylabel('Count') plt.xticks(rotation=90) plt.show()
code
129032013/cell_8
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler dir = '/kaggle/input/the-movies-dataset/' ratings = pd.read_csv(dir + 'ratings_small.csv') metadata = pd.read_csv(dir + 'movies_metadata.csv', low_memory=False) metadata = metadata.rename(columns={'id': 'movieId'}) metadata['movieId'] = pd.to_numeric(metadata['movieId'], errors='coerce') combined_data = pd.merge(ratings, metadata, on='movieId') df_grouped = combined_data.groupby(['movieId', 'userId'])['rating'].mean().reset_index() wide_df = df_grouped.pivot(index='userId', columns='movieId', values='rating') df_clean = wide_df.dropna(thresh=20, axis=1) df_clean import json import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime combined_data['release_date'] = pd.to_datetime(combined_data['release_date'], errors='coerce') def parse_genres(genres_str): genres = json.loads(genres_str.replace("'", '"')) genres_list = [g['name'] for g in genres] return genres_list combined_data['genres'] = combined_data['genres'].apply(parse_genres) combined_data['release_year'] = combined_data['release_date'].dt.year all_genres = np.concatenate(combined_data['genres'].values) unique_genres, counts = np.unique(all_genres, return_counts=True) genre_counts = pd.DataFrame({'genre': unique_genres, 'count': counts}).sort_values(by='count', ascending=False) plt.xticks(rotation=90) plt.figure(figsize=(10, 5)) sns.countplot(data=combined_data, x='original_language') plt.title('Distribution of Original Language') plt.xlabel('Language') plt.ylabel('Count') plt.xticks(rotation=90) plt.show()
code
129032013/cell_5
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import seaborn as sns import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler dir = '/kaggle/input/the-movies-dataset/' ratings = pd.read_csv(dir + 'ratings_small.csv') metadata = pd.read_csv(dir + 'movies_metadata.csv', low_memory=False) metadata = metadata.rename(columns={'id': 'movieId'}) metadata['movieId'] = pd.to_numeric(metadata['movieId'], errors='coerce') combined_data = pd.merge(ratings, metadata, on='movieId') df_grouped = combined_data.groupby(['movieId', 'userId'])['rating'].mean().reset_index() wide_df = df_grouped.pivot(index='userId', columns='movieId', values='rating') df_clean = wide_df.dropna(thresh=20, axis=1) df_clean import json import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime combined_data['release_date'] = pd.to_datetime(combined_data['release_date'], errors='coerce') def parse_genres(genres_str): genres = json.loads(genres_str.replace("'", '"')) genres_list = [g['name'] for g in genres] return genres_list combined_data['genres'] = combined_data['genres'].apply(parse_genres) combined_data['release_year'] = combined_data['release_date'].dt.year plt.figure(figsize=(10, 5)) sns.histplot(combined_data['release_year'], bins=np.arange(1900, 2023, 1), kde=False) plt.title('Distribution of Movie Release Years') plt.xlabel('Release Year') plt.ylabel('Count') plt.show()
code
49126539/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) data = df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'MiscFeature', 'Fence'], axis=1) data.shape arr_train_cor = data.corr()['SalePrice'] idx_train_cor_gt0 = arr_train_cor[arr_train_cor > 0].sort_values(ascending=False).index.tolist() d1 = idx_train_cor_gt0 print(d1)
code
49126539/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) sns.heatmap(df.corr(), annot=True) plt.show()
code
49126539/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) data = df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'MiscFeature', 'Fence'], axis=1) data.shape
code
49126539/cell_2
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.info() df.shape
code
49126539/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) data = df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'MiscFeature', 'Fence'], axis=1) data.shape arr_train_cor = data.corr()['SalePrice'] idx_train_cor_gt0 = arr_train_cor[arr_train_cor > 0].sort_values(ascending=False).index.tolist() d1 = idx_train_cor_gt0 data1 = data[d1] data1.shape data1.head()
code
49126539/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from scipy import stats from scipy.stats import norm from scipy.stats import binned_statistic import warnings warnings.filterwarnings('ignore')
code
49126539/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) data = df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'MiscFeature', 'Fence'], axis=1) data.shape sns.heatmap(data.corr(), vmax=0.8, square=True)
code
49126539/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) data = df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'MiscFeature', 'Fence'], axis=1) data.shape arr_train_cor = data.corr()['SalePrice'] idx_train_cor_gt0 = arr_train_cor[arr_train_cor > 0].sort_values(ascending=False).index.tolist() print(len(idx_train_cor_gt0))
code
49126539/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) null_in_train_csv.plot.bar()
code
49126539/cell_10
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df.shape null_in_train_csv = df.isnull().sum() null_in_train_csv = null_in_train_csv[null_in_train_csv > 0] null_in_train_csv.sort_values(inplace=True) data = df.drop(['Alley', 'FireplaceQu', 'PoolQC', 'MiscFeature', 'Fence'], axis=1) data.shape arr_train_cor = data.corr()['SalePrice'] idx_train_cor_gt0 = arr_train_cor[arr_train_cor > 0].sort_values(ascending=False).index.tolist() d1 = idx_train_cor_gt0 data1 = data[d1] data1.shape data1.info()
code
333996/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import warnings import pandas as pd import warnings warnings.filterwarnings('ignore') fm = pd.read_csv('../input/ForumMessages.csv') fm.info()
code
333996/cell_7
[ "image_output_1.png" ]
from collections import defaultdict import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd import re import warnings import warnings import pandas as pd import warnings warnings.filterwarnings('ignore') fm = pd.read_csv('../input/ForumMessages.csv') import pandas as pd import warnings warnings.filterwarnings('ignore') fm = pd.read_csv('../input/ForumMessages.csv') rep_ids = fm['ReplyToForumMessageId'].unique().tolist() rep_fm = fm[fm['Id'].isin(rep_ids)] nb_replies = fm['ReplyToForumMessageId'].value_counts() rep_fm['NbReplies'] = rep_fm['Id'].map(nb_replies) import re def words_wo_quotes(mes): mes_wo_q = re.sub('\\[quote=.*\\[/quote\\]', '', mes, flags=re.DOTALL) return set([w for w in re.split('[^a-z]', mes_wo_q.lower()) if len(w) >= 2]) rep_fm['Message'] = rep_fm['Message'].astype(str).apply(words_wo_quotes) from collections import defaultdict dict_rep = defaultdict(int) def fill_dict_rep(x): for w in x[0]: dict_rep[w] += x[1] rep_fm[['Message', 'NbReplies']].apply(fill_dict_rep, axis=1) all_messages = fm['Message'].astype(str).apply(words_wo_quotes) dict_all = dict.fromkeys(dict_rep.keys(), 0) def fill_dict_all(x): for w in x: if w in dict_rep: dict_all[w] += 1 all_messages.apply(fill_dict_all) min_proportion = fm.shape[0] / 100 lwords = [] for w in dict_all: if dict_all[w] > min_proportion: lwords.append([w, dict_rep[w] / dict_all[w]]) lwords = sorted(lwords, key=lambda h: h[1], reverse=True) import matplotlib.pyplot as plt import numpy as np plt.rcParams['figure.figsize'] = (10, 12) x_axis = [wx[1] for wx in lwords[:20]] y_axis = [wx[0] for wx in lwords[:20]] plt.barh(range(60, 0, -3), [1] * 20, height=1.5, alpha=0.4) plt.barh(range(60, 0, -3), x_axis, height=1.5, alpha=0.4) plt.yticks(np.arange(60.5, 0.5, -3), y_axis, fontsize=20) fm = pd.read_csv('../input/ForumMessages.csv') fm = fm[['Message', 'Score']] fm['Message'] = fm['Message'].astype(str).apply(words_wo_quotes) score_dict = defaultdict(int) count_dict = defaultdict(int) def fill_dict(x): for w in x[0]: score_dict[w] += x[1] count_dict[w] += 1 fm.apply(fill_dict, axis=1) lwords = [] for de in score_dict: if count_dict[de] > min_proportion: lwords.append([de, score_dict[de] / count_dict[de]]) lwords = sorted(lwords, key=lambda h: h[1], reverse=True) plt.rcParams['figure.figsize'] = (10, 12) x_axis = [wx[1] for wx in lwords[:20]] y_axis = [wx[0] for wx in lwords[:20]] plt.barh(range(60, 0, -3), x_axis, height=1.5, alpha=0.6) plt.yticks(np.arange(60.5, 0.5, -3), y_axis, fontsize=20) plt.xlabel('Average score', fontsize=16) plt.ylabel('Word used', fontsize=16) plt.title('The 20 best words to use to have a good score', fontsize=22) plt.show()
code
333996/cell_5
[ "image_output_1.png" ]
from collections import defaultdict import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd import re import warnings import warnings import pandas as pd import warnings warnings.filterwarnings('ignore') fm = pd.read_csv('../input/ForumMessages.csv') import pandas as pd import warnings warnings.filterwarnings('ignore') fm = pd.read_csv('../input/ForumMessages.csv') rep_ids = fm['ReplyToForumMessageId'].unique().tolist() rep_fm = fm[fm['Id'].isin(rep_ids)] nb_replies = fm['ReplyToForumMessageId'].value_counts() rep_fm['NbReplies'] = rep_fm['Id'].map(nb_replies) import re def words_wo_quotes(mes): mes_wo_q = re.sub('\\[quote=.*\\[/quote\\]', '', mes, flags=re.DOTALL) return set([w for w in re.split('[^a-z]', mes_wo_q.lower()) if len(w) >= 2]) rep_fm['Message'] = rep_fm['Message'].astype(str).apply(words_wo_quotes) from collections import defaultdict dict_rep = defaultdict(int) def fill_dict_rep(x): for w in x[0]: dict_rep[w] += x[1] rep_fm[['Message', 'NbReplies']].apply(fill_dict_rep, axis=1) all_messages = fm['Message'].astype(str).apply(words_wo_quotes) dict_all = dict.fromkeys(dict_rep.keys(), 0) def fill_dict_all(x): for w in x: if w in dict_rep: dict_all[w] += 1 all_messages.apply(fill_dict_all) min_proportion = fm.shape[0] / 100 lwords = [] for w in dict_all: if dict_all[w] > min_proportion: lwords.append([w, dict_rep[w] / dict_all[w]]) lwords = sorted(lwords, key=lambda h: h[1], reverse=True) import matplotlib.pyplot as plt import numpy as np plt.rcParams['figure.figsize'] = (10, 12) x_axis = [wx[1] for wx in lwords[:20]] y_axis = [wx[0] for wx in lwords[:20]] plt.barh(range(60, 0, -3), [1] * 20, height=1.5, alpha=0.4) plt.barh(range(60, 0, -3), x_axis, height=1.5, alpha=0.4) plt.yticks(np.arange(60.5, 0.5, -3), y_axis, fontsize=20) plt.xlabel('Proportion of replies', fontsize=16) plt.ylabel('Word used', fontsize=16) plt.title('The 20 best words to use to get replies', fontsize=22) plt.show()
code
105194092/cell_21
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() del xx gc.collect() del x gc.collect() del eval_ids_cell_num, eval_ids_gene_num, valid_multi_rows, eval_ids, test_index, y_columns gc.collect()
code
105194092/cell_13
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
eval_ids = pd.read_parquet('../input/multimodal-single-cell-as-sparse-matrix/evaluation.parquet') eval_ids.cell_id = eval_ids.cell_id.astype(pd.CategoricalDtype()) eval_ids.gene_id = eval_ids.gene_id.astype(pd.CategoricalDtype())
code
105194092/cell_29
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() del xx gc.collect() submission = pd.Series(name='target', index=pd.MultiIndex.from_frame(eval_ids), dtype=np.float32) submission cell_dict = dict(((k, v) for v, k in enumerate(test_index))) assert len(cell_dict) == len(test_index) gene_dict = dict(((k, v) for v, k in enumerate(y_columns))) assert len(gene_dict) == len(y_columns) eval_ids_cell_num = eval_ids.cell_id.apply(lambda x: cell_dict.get(x, -1)) eval_ids_gene_num = eval_ids.gene_id.apply(lambda x: gene_dict.get(x, -1)) valid_multi_rows = (eval_ids_gene_num != -1) & (eval_ids_cell_num != -1) submission.iloc[valid_multi_rows] = preds[eval_ids_cell_num[valid_multi_rows].to_numpy(), eval_ids_gene_num[valid_multi_rows].to_numpy()] submission.reset_index(drop=True, inplace=True) submission.index.name = 'row_id' cite_submission = pd.read_csv('../input/msci-citeseq-keras-quickstart/submission.csv') cite_submission = cite_submission.set_index('row_id') cite_submission = cite_submission['target'] submission[submission.isnull()] = cite_submission[submission.isnull()] submission.isnull().any()
code
105194092/cell_11
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() del xx gc.collect() del x gc.collect()
code
105194092/cell_7
[ "text_plain_output_1.png" ]
multi_test_x = scipy.sparse.load_npz('../input/multimodal-single-cell-as-sparse-matrix/test_multi_inputs_values.sparse.npz') multi_test_x = pca.transform(multi_test_x)
code
105194092/cell_32
[ "text_plain_output_1.png" ]
!head submission.csv
code
105194092/cell_28
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() del xx gc.collect() submission = pd.Series(name='target', index=pd.MultiIndex.from_frame(eval_ids), dtype=np.float32) submission cell_dict = dict(((k, v) for v, k in enumerate(test_index))) assert len(cell_dict) == len(test_index) gene_dict = dict(((k, v) for v, k in enumerate(y_columns))) assert len(gene_dict) == len(y_columns) eval_ids_cell_num = eval_ids.cell_id.apply(lambda x: cell_dict.get(x, -1)) eval_ids_gene_num = eval_ids.gene_id.apply(lambda x: gene_dict.get(x, -1)) valid_multi_rows = (eval_ids_gene_num != -1) & (eval_ids_cell_num != -1) submission.iloc[valid_multi_rows] = preds[eval_ids_cell_num[valid_multi_rows].to_numpy(), eval_ids_gene_num[valid_multi_rows].to_numpy()] submission.reset_index(drop=True, inplace=True) submission.index.name = 'row_id' cite_submission = pd.read_csv('../input/msci-citeseq-keras-quickstart/submission.csv') cite_submission = cite_submission.set_index('row_id') cite_submission = cite_submission['target'] submission[submission.isnull()] = cite_submission[submission.isnull()] submission
code
105194092/cell_8
[ "text_plain_output_1.png" ]
import gc n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect()
code
105194092/cell_16
[ "text_plain_output_1.png" ]
y_columns = np.load('../input/multimodal-single-cell-as-sparse-matrix/train_multi_targets_idxcol.npz', allow_pickle=True)['columns'] test_index = np.load('../input/multimodal-single-cell-as-sparse-matrix/test_multi_inputs_idxcol.npz', allow_pickle=True)['index']
code
105194092/cell_31
[ "text_plain_output_1.png" ]
!head submission.csv
code
105194092/cell_14
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() del xx gc.collect() submission = pd.Series(name='target', index=pd.MultiIndex.from_frame(eval_ids), dtype=np.float32) submission
code
105194092/cell_22
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() del xx gc.collect() submission = pd.Series(name='target', index=pd.MultiIndex.from_frame(eval_ids), dtype=np.float32) submission cell_dict = dict(((k, v) for v, k in enumerate(test_index))) assert len(cell_dict) == len(test_index) gene_dict = dict(((k, v) for v, k in enumerate(y_columns))) assert len(gene_dict) == len(y_columns) eval_ids_cell_num = eval_ids.cell_id.apply(lambda x: cell_dict.get(x, -1)) eval_ids_gene_num = eval_ids.gene_id.apply(lambda x: gene_dict.get(x, -1)) valid_multi_rows = (eval_ids_gene_num != -1) & (eval_ids_cell_num != -1) submission.iloc[valid_multi_rows] = preds[eval_ids_cell_num[valid_multi_rows].to_numpy(), eval_ids_gene_num[valid_multi_rows].to_numpy()] submission
code
105194092/cell_10
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): print(ind, end=' ') with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() print('') del xx gc.collect()
code
105194092/cell_27
[ "text_plain_output_1.png" ]
import gc import numpy as np import pandas as pd import pickle def correlation_score(y_true, y_pred): """Scores the predictions according to the competition rules. It is assumed that the predictions are not constant. Returns the average of each sample's Pearson correlation coefficient""" if type(y_true) == pd.DataFrame: y_true = y_true.values if type(y_pred) == pd.DataFrame: y_pred = y_pred.values if y_true.shape != y_pred.shape: raise ValueError('Shapes are different.') corrsum = 0 for i in range(len(y_true)): corrsum += np.corrcoef(y_true[i], y_pred[i])[1, 0] return corrsum / len(y_true) def load(fname): with open(fname, 'rb') as f: return pickle.load(f) pca = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca.pkl') pca2 = load('../input/fork-of-msci-multiome-randomsampling-sp-6b182b/pca2.pkl') n = 1 test_len = multi_test_x.shape[0] d = test_len // n x = [] for i in range(n): x.append(multi_test_x[i * d:i * d + d]) del multi_test_x gc.collect() index = 2 from catboost import CatBoostRegressor preds = np.zeros((test_len, 23418), dtype='float16') for i, xx in enumerate(x): for ind in range(index): with open(f'../input/fork-of-msci-multiome-randomsampling-sp-6b182b/model{ind:02}.pkl', 'rb') as file: model = pickle.load(file) preds[i * d:i * d + d, :] += model.predict(xx) @ pca2.components_ / index gc.collect() del xx gc.collect() submission = pd.Series(name='target', index=pd.MultiIndex.from_frame(eval_ids), dtype=np.float32) submission cell_dict = dict(((k, v) for v, k in enumerate(test_index))) assert len(cell_dict) == len(test_index) gene_dict = dict(((k, v) for v, k in enumerate(y_columns))) assert len(gene_dict) == len(y_columns) eval_ids_cell_num = eval_ids.cell_id.apply(lambda x: cell_dict.get(x, -1)) eval_ids_gene_num = eval_ids.gene_id.apply(lambda x: gene_dict.get(x, -1)) valid_multi_rows = (eval_ids_gene_num != -1) & (eval_ids_cell_num != -1) submission.iloc[valid_multi_rows] = preds[eval_ids_cell_num[valid_multi_rows].to_numpy(), eval_ids_gene_num[valid_multi_rows].to_numpy()] submission.reset_index(drop=True, inplace=True) submission.index.name = 'row_id' cite_submission = pd.read_csv('../input/msci-citeseq-keras-quickstart/submission.csv') cite_submission = cite_submission.set_index('row_id') cite_submission = cite_submission['target'] submission[submission.isnull()] = cite_submission[submission.isnull()] submission
code
105212890/cell_4
[ "text_plain_output_1.png" ]
n = 3 my_list = [2, 3, 4, 5, 6, 7, 8] my_new_list = my_list[:n + 1] print(my_new_list)
code
105212890/cell_6
[ "text_plain_output_1.png" ]
n = 3 my_list = [2, 3, 4, 5, 6, 7, 8] my_new_list = my_list[:n + 1] my_list = ['haha', 'one', 'two', 'serious'] first_item = my_list[0] last_item = my_list[-1] my_result = first_item == last_item print(my_result)
code
105212890/cell_2
[ "text_plain_output_1.png" ]
number_1 = 60 number_2 = 50 my_product = number_1 * number_2 if my_product > 1000: print('Product:', my_product) else: my_sum = number_1 + number_2 print('Sum:', my_sum)
code
105212890/cell_8
[ "text_plain_output_1.png" ]
sampleDict = {'class': {'student': {'name': 'Mike', 'marks': {'physics': 70, 'history': 80}}}} my_print = sampleDict['class']['student']['marks']['history'] print(my_print)
code
105212890/cell_10
[ "text_plain_output_1.png" ]
sample_dict = {'emp1': {'name': 'Javi', 'salary': 7500}, 'emp2': {'name': 'Laura', 'salary': 8000}, 'emp3': {'name': 'Dimitris', 'salary': 500}} sample_dict['emp2']['salary'] = 0 print(sample_dict)
code
105212890/cell_12
[ "text_plain_output_1.png" ]
set1 = {20, 50, 4, 88, 15, 3} set2 = {20, 40, 50, 15} update = set1.intersection(set2) set1 = update print(set1)
code
89142931/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from torch import nn from torchvision.datasets import ImageFolder from torchvision.models import resnet18 from torchvision.transforms import Compose, Normalize, Resize, ToTensor from tqdm.auto import tqdm import numpy as np import sys import torch from torchvision.datasets import ImageFolder from torchvision.transforms import Compose, Normalize, Resize, ToTensor dataset = ImageFolder('/kaggle/input/the-office-characters/Office Dataset/', transform=Compose([Resize((224, 224)), ToTensor(), Normalize((0.5, 0.5, 0.5), (1, 1, 1))])) train_set, test_set = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8 * len(dataset))]) train_dataloader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True) test_dataloader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=False) from torchvision.models import resnet18 model = resnet18(pretrained=True) for param in model.parameters(): param.requires_grad = False model.fc = nn.Linear(512, 6) def train_epoch(model, data_loader, optimizer, criterion, return_losses=False, device='cuda:0'): model = model.to(device).train() total_loss = 0 num_batches = 0 all_losses = [] total_predictions = np.array([]) total_labels = np.array([]) with tqdm(total=len(data_loader), file=sys.stdout) as prbar: for images, labels in data_loader: images = images.to(device) labels = labels.to(device) predicted = model(images) loss = criterion(predicted, labels) loss.backward() optimizer.step() optimizer.zero_grad() accuracy = (predicted.argmax(1) == labels).float().mean() prbar.set_description(f'Loss: {round(loss.item(), 4)} Accuracy: {round(accuracy.item() * 100, 4)}') prbar.update(1) total_loss += loss.item() total_predictions = np.append(total_predictions, predicted.argmax(1).cpu().detach().numpy()) total_labels = np.append(total_labels, labels.cpu().detach().numpy()) num_batches += 1 all_losses.append(loss.detach().item()) metrics = {'loss': total_loss / num_batches} metrics.update({'accuracy': (total_predictions == total_labels).mean()}) if return_losses: return (metrics, all_losses) else: return metrics def validate(model, data_loader, criterion, device='cuda:0'): model = model.eval() total_loss = 0 num_batches = 0 total_predictions = np.array([]) total_labels = np.array([]) with tqdm(total=len(data_loader), file=sys.stdout) as prbar: for images, labels in data_loader: images = images.to(device) labels = labels.to(device) predicted = model(images) loss = criterion(predicted, labels) accuracy = (predicted.argmax(1) == labels).float().mean() prbar.set_description(f'Loss: {round(loss.item(), 4)} Accuracy: {round(accuracy.item() * 100, 4)}') prbar.update(1) total_loss += loss.item() total_predictions = np.append(total_predictions, predicted.argmax(1).cpu().detach().numpy()) total_labels = np.append(total_labels, labels.cpu().detach().numpy()) num_batches += 1 metrics = {'loss': total_loss / num_batches} metrics.update({'accuracy': (total_predictions == total_labels).mean()}) return metrics def fit(model, epochs, train_data_loader, validation_data_loader, optimizer, criterion, device='cuda:0'): all_train_losses = [] epoch_train_losses = [] epoch_eval_losses = [] for epoch in range(epochs): train_metrics, one_epoch_train_losses = train_epoch(model=model, data_loader=train_data_loader, optimizer=optimizer, return_losses=True, criterion=criterion, device=device) all_train_losses.extend(one_epoch_train_losses) epoch_train_losses.append(train_metrics['loss']) with torch.no_grad(): validation_metrics = validate(model=model, data_loader=validation_data_loader, criterion=criterion) epoch_eval_losses.append(validation_metrics['loss']) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.fc.parameters(), 0.0001) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' fit(model, 30, train_dataloader, test_dataloader, optimizer, criterion, device=device)
code
89142931/cell_4
[ "text_plain_output_56.png", "text_plain_output_35.png", "text_plain_output_43.png", "text_plain_output_37.png", "text_plain_output_5.png", "text_plain_output_48.png", "text_plain_output_30.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_40.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_60.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_52.png", "text_plain_output_45.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_29.png", "text_plain_output_58.png", "text_plain_output_49.png", "text_plain_output_27.png", "text_plain_output_54.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_57.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_47.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_59.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_42.png", "text_plain_output_53.png", "text_plain_output_23.png", "text_plain_output_51.png", "text_plain_output_28.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "text_plain_output_55.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_46.png" ]
from torch import nn from torchvision.models import resnet18 from torchvision.models import resnet18 model = resnet18(pretrained=True) for param in model.parameters(): param.requires_grad = False model.fc = nn.Linear(512, 6)
code
89142931/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from torch import nn from torchvision.datasets import ImageFolder from torchvision.models import resnet18 from torchvision.transforms import Compose, Normalize, Resize, ToTensor from tqdm.auto import tqdm import matplotlib.pyplot as plt import numpy as np import sys import torch from torchvision.datasets import ImageFolder from torchvision.transforms import Compose, Normalize, Resize, ToTensor dataset = ImageFolder('/kaggle/input/the-office-characters/Office Dataset/', transform=Compose([Resize((224, 224)), ToTensor(), Normalize((0.5, 0.5, 0.5), (1, 1, 1))])) train_set, test_set = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8 * len(dataset))]) train_dataloader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True) test_dataloader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=False) from torchvision.models import resnet18 model = resnet18(pretrained=True) for param in model.parameters(): param.requires_grad = False model.fc = nn.Linear(512, 6) def train_epoch(model, data_loader, optimizer, criterion, return_losses=False, device='cuda:0'): model = model.to(device).train() total_loss = 0 num_batches = 0 all_losses = [] total_predictions = np.array([]) total_labels = np.array([]) with tqdm(total=len(data_loader), file=sys.stdout) as prbar: for images, labels in data_loader: images = images.to(device) labels = labels.to(device) predicted = model(images) loss = criterion(predicted, labels) loss.backward() optimizer.step() optimizer.zero_grad() accuracy = (predicted.argmax(1) == labels).float().mean() prbar.set_description(f'Loss: {round(loss.item(), 4)} Accuracy: {round(accuracy.item() * 100, 4)}') prbar.update(1) total_loss += loss.item() total_predictions = np.append(total_predictions, predicted.argmax(1).cpu().detach().numpy()) total_labels = np.append(total_labels, labels.cpu().detach().numpy()) num_batches += 1 all_losses.append(loss.detach().item()) metrics = {'loss': total_loss / num_batches} metrics.update({'accuracy': (total_predictions == total_labels).mean()}) if return_losses: return (metrics, all_losses) else: return metrics def validate(model, data_loader, criterion, device='cuda:0'): model = model.eval() total_loss = 0 num_batches = 0 total_predictions = np.array([]) total_labels = np.array([]) with tqdm(total=len(data_loader), file=sys.stdout) as prbar: for images, labels in data_loader: images = images.to(device) labels = labels.to(device) predicted = model(images) loss = criterion(predicted, labels) accuracy = (predicted.argmax(1) == labels).float().mean() prbar.set_description(f'Loss: {round(loss.item(), 4)} Accuracy: {round(accuracy.item() * 100, 4)}') prbar.update(1) total_loss += loss.item() total_predictions = np.append(total_predictions, predicted.argmax(1).cpu().detach().numpy()) total_labels = np.append(total_labels, labels.cpu().detach().numpy()) num_batches += 1 metrics = {'loss': total_loss / num_batches} metrics.update({'accuracy': (total_predictions == total_labels).mean()}) return metrics def fit(model, epochs, train_data_loader, validation_data_loader, optimizer, criterion, device='cuda:0'): all_train_losses = [] epoch_train_losses = [] epoch_eval_losses = [] for epoch in range(epochs): train_metrics, one_epoch_train_losses = train_epoch(model=model, data_loader=train_data_loader, optimizer=optimizer, return_losses=True, criterion=criterion, device=device) all_train_losses.extend(one_epoch_train_losses) epoch_train_losses.append(train_metrics['loss']) with torch.no_grad(): validation_metrics = validate(model=model, data_loader=validation_data_loader, criterion=criterion) epoch_eval_losses.append(validation_metrics['loss']) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.fc.parameters(), 0.0001) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' cl = 1 while cl == 1: i = np.random.randint(0, len(test_set)) cl = test_set[i][1] image = test_set[i][0] image = image.to(device) image.requires_grad = True pred = model(image[None]) predicted_label = pred.argmax(1).item() confidence = pred.softmax(1)[0][predicted_label] d = {i: j for i, j in zip(range(6), ['Angela', 'Dwight', 'Jim', 'Kevin', 'Michael', 'Pam'])} plt.title('%s, confidence = %0.4f' % (d[predicted_label], confidence.item())) plt.imshow(image.cpu().detach().numpy().transpose((1, 2, 0)) + 0.5)
code
72086322/cell_21
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df_gene = pd.read_csv('../input/colorectal-cancer-patients/crc_ge.txt', sep='\t') df_gene = df_gene.transpose() col_names = df_gene.iloc[0].tolist() df_gene.columns = col_names df_gene = df_gene.drop(axis=0, index='ID_REF') df_gene.insert(loc=0, column='ID_REF', value=df_gene.index) df_gene = df_gene.reset_index(drop=True) features_gene_num = df_gene.columns.tolist()[1:] df_gene[features_gene_num] = df_gene[features_gene_num].astype(float) df_gene
code
72086322/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df features_num = ['Age (in years)', 'DFS (in months)'] for f in features_num: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 7)) ax1.hist(df[f]) ax1.grid() ax1.set_title('Histogram of ' + f) ax2.boxplot(df[f], vert=False) ax2.grid() ax2.set_title('Boxplot of ' + f) plt.show()
code
72086322/cell_25
[ "image_output_5.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df features_num = ['Age (in years)', 'DFS (in months)'] # plot distributions for f in features_num: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,7)) ax1.hist(df[f]) ax1.grid() ax1.set_title('Histogram of ' + f) ax2.boxplot(df[f], vert=False) ax2.grid() ax2.set_title('Boxplot of ' + f) plt.show() # correlations corr_pearson = df[features_num].corr(method='pearson') corr_spearman = df[features_num].corr(method='spearman') fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_pearson, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Pearson Correlation') plt.show() fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_spearman, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Spearman Correlation') plt.show() features_cat = ['Dukes Stage', 'Gender', 'Location', 'DFS event', 'Adj_Radio', 'Adj_Chem'] for f in features_cat: df[f].value_counts().sort_index().plot(kind='bar') df_gene = pd.read_csv('../input/colorectal-cancer-patients/crc_ge.txt', sep='\t') df_gene = df_gene.transpose() col_names = df_gene.iloc[0].tolist() df_gene.columns = col_names df_gene = df_gene.drop(axis=0, index='ID_REF') df_gene.insert(loc=0, column='ID_REF', value=df_gene.index) df_gene = df_gene.reset_index(drop=True) features_gene_num = df_gene.columns.tolist()[1:] df_gene[features_gene_num] = df_gene[features_gene_num].astype(float) for i in range(38): print('Columns', 50 * i + 1, 'to', 50 * i + 50) df_gene.iloc[:, 50 * i + 1:50 * i + 50 + 1].plot(kind='box', figsize=(15, 5)) plt.xticks(rotation=90) plt.grid() plt.show() print('Columns', 1901, 'to', 1935) df_gene.iloc[:, 1901:1935 + 1].plot(kind='box', figsize=(15, 5)) plt.xticks(rotation=90) plt.grid() plt.show()
code
72086322/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df
code
72086322/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df features_num = ['Age (in years)', 'DFS (in months)'] # plot distributions for f in features_num: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,7)) ax1.hist(df[f]) ax1.grid() ax1.set_title('Histogram of ' + f) ax2.boxplot(df[f], vert=False) ax2.grid() ax2.set_title('Boxplot of ' + f) plt.show() # correlations corr_pearson = df[features_num].corr(method='pearson') corr_spearman = df[features_num].corr(method='spearman') fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_pearson, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Pearson Correlation') plt.show() fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_spearman, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Spearman Correlation') plt.show() plt.scatter(df['Age (in years)'], df['DFS (in months)']) plt.title('DFS vs Age') plt.xlabel('Age') plt.ylabel('DFS (months)') plt.grid() plt.show()
code
72086322/cell_18
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df_gene = pd.read_csv('../input/colorectal-cancer-patients/crc_ge.txt', sep='\t') df_gene = df_gene.transpose() df_gene.head()
code
72086322/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df features_num = ['Age (in years)', 'DFS (in months)'] # plot distributions for f in features_num: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,7)) ax1.hist(df[f]) ax1.grid() ax1.set_title('Histogram of ' + f) ax2.boxplot(df[f], vert=False) ax2.grid() ax2.set_title('Boxplot of ' + f) plt.show() # correlations corr_pearson = df[features_num].corr(method='pearson') corr_spearman = df[features_num].corr(method='spearman') fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_pearson, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Pearson Correlation') plt.show() fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_spearman, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Spearman Correlation') plt.show() features_cat = ['Dukes Stage', 'Gender', 'Location', 'DFS event', 'Adj_Radio', 'Adj_Chem'] for f in features_cat: df[f].value_counts().sort_index().plot(kind='bar') df_gene = pd.read_csv('../input/colorectal-cancer-patients/crc_ge.txt', sep='\t') df_gene = df_gene.transpose() col_names = df_gene.iloc[0].tolist() df_gene.columns = col_names df_gene = df_gene.drop(axis=0, index='ID_REF') df_gene.insert(loc=0, column='ID_REF', value=df_gene.index) df_gene = df_gene.reset_index(drop=True) features_gene_num = df_gene.columns.tolist()[1:] df_gene[features_gene_num] = df_gene[features_gene_num].astype(float) for i in range(38): plt.xticks(rotation=90) plt.xticks(rotation=90) df_combined = df.join(other=df_gene.set_index('ID_REF'), on='ID_REF', how='left') df_combined
code
72086322/cell_8
[ "image_output_11.png", "text_plain_output_35.png", "image_output_24.png", "text_plain_output_37.png", "image_output_25.png", "text_plain_output_5.png", "text_plain_output_30.png", "text_plain_output_15.png", "image_output_17.png", "image_output_30.png", "text_plain_output_9.png", "image_output_14.png", "image_output_39.png", "image_output_28.png", "text_plain_output_31.png", "text_plain_output_20.png", "image_output_23.png", "image_output_34.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_5.png", "text_plain_output_14.png", "image_output_18.png", "text_plain_output_32.png", "text_plain_output_29.png", "image_output_21.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_24.png", "text_plain_output_21.png", "image_output_31.png", "text_plain_output_25.png", "image_output_20.png", "text_plain_output_18.png", "text_plain_output_36.png", "image_output_32.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_22.png", "image_output_35.png", "text_plain_output_38.png", "text_plain_output_7.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "text_plain_output_16.png", "image_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "image_output_27.png", "image_output_6.png", "text_plain_output_34.png", "text_plain_output_23.png", "image_output_12.png", "text_plain_output_28.png", "image_output_22.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "image_output_3.png", "image_output_29.png", "text_plain_output_19.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_33.png", "image_output_15.png", "image_output_9.png", "image_output_19.png", "image_output_38.png", "image_output_26.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df features_num = ['Age (in years)', 'DFS (in months)'] df[features_num].describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
code
72086322/cell_16
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df_gene = pd.read_csv('../input/colorectal-cancer-patients/crc_ge.txt', sep='\t') df_gene.head()
code
72086322/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df
code
72086322/cell_24
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df_gene = pd.read_csv('../input/colorectal-cancer-patients/crc_ge.txt', sep='\t') df_gene = df_gene.transpose() col_names = df_gene.iloc[0].tolist() df_gene.columns = col_names df_gene = df_gene.drop(axis=0, index='ID_REF') df_gene.insert(loc=0, column='ID_REF', value=df_gene.index) df_gene = df_gene.reset_index(drop=True) features_gene_num = df_gene.columns.tolist()[1:] df_gene[features_gene_num] = df_gene[features_gene_num].astype(float) df_gene[features_gene_num].describe()
code
72086322/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df features_num = ['Age (in years)', 'DFS (in months)'] # plot distributions for f in features_num: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,7)) ax1.hist(df[f]) ax1.grid() ax1.set_title('Histogram of ' + f) ax2.boxplot(df[f], vert=False) ax2.grid() ax2.set_title('Boxplot of ' + f) plt.show() # correlations corr_pearson = df[features_num].corr(method='pearson') corr_spearman = df[features_num].corr(method='spearman') fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_pearson, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Pearson Correlation') plt.show() fig = plt.figure(figsize = (8,6)) sns.heatmap(corr_spearman, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Spearman Correlation') plt.show() features_cat = ['Dukes Stage', 'Gender', 'Location', 'DFS event', 'Adj_Radio', 'Adj_Chem'] for f in features_cat: df[f].value_counts().sort_index().plot(kind='bar') plt.title(f) plt.grid() plt.show()
code
72086322/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df features_num = ['Age (in years)', 'DFS (in months)'] # plot distributions for f in features_num: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,7)) ax1.hist(df[f]) ax1.grid() ax1.set_title('Histogram of ' + f) ax2.boxplot(df[f], vert=False) ax2.grid() ax2.set_title('Boxplot of ' + f) plt.show() corr_pearson = df[features_num].corr(method='pearson') corr_spearman = df[features_num].corr(method='spearman') fig = plt.figure(figsize=(8, 6)) sns.heatmap(corr_pearson, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Pearson Correlation') plt.show() fig = plt.figure(figsize=(8, 6)) sns.heatmap(corr_spearman, annot=True, cmap='RdYlGn', vmin=-1, vmax=+1) plt.title('Spearman Correlation') plt.show()
code
72086322/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/colorectal-cancer-patients/crc.txt', sep='\t') df df = df.drop('Unnamed: 9', axis=1) df = df.drop('Unnamed: 10', axis=1) df = df.drop('Unnamed: 11', axis=1) df = df.drop(index=62, axis=0) df df.info()
code
16146582/cell_6
[ "image_output_1.png" ]
from keras.layers import Dense from keras.models import Sequential import numpy as np import pandas as pd train_csv_df = pd.read_csv('../input/train.csv') test_csv_df = pd.read_csv('../input/test.csv') y_train = np.array(train_csv_df['label']) X_train = np.array(train_csv_df.drop('label', 1)) X_test = np.array(test_csv_df) count = 0 for index in range(0, 6): count += 1 y_train = np.eye(10)[y_train] X_test = X_test / 255 X_train = X_train / 255 model = Sequential() model.add(Dense(units=512, input_dim=784, activation='relu')) model.add(Dense(units=330, activation='relu')) model.add(Dense(units=212, activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dense(units=152, activation='relu')) model.add(Dense(units=152, activation='relu')) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, epochs=50, batch_size=6000) predictions = model.predict(X_test) count = 0 for index in range(0, 6): plt.subplot(2, 3, count + 1) plt.title(np.argmax(predictions[index])) plt.imshow(X_test[index].reshape(28, 28), cmap='gray') count += 1 plt.show()
code
16146582/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd from matplotlib import pyplot as plt from keras.models import Sequential from keras.layers import Dense
code
16146582/cell_3
[ "image_output_1.png" ]
import numpy as np import pandas as pd train_csv_df = pd.read_csv('../input/train.csv') test_csv_df = pd.read_csv('../input/test.csv') y_train = np.array(train_csv_df['label']) X_train = np.array(train_csv_df.drop('label', 1)) X_test = np.array(test_csv_df) count = 0 for index in range(0, 6): plt.subplot(2, 3, count + 1) plt.title(y_train[index]) plt.imshow(X_train[index].reshape(28, 28), cmap='gray') count += 1 plt.show()
code
16146582/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Dense from keras.models import Sequential import numpy as np import pandas as pd train_csv_df = pd.read_csv('../input/train.csv') test_csv_df = pd.read_csv('../input/test.csv') y_train = np.array(train_csv_df['label']) X_train = np.array(train_csv_df.drop('label', 1)) X_test = np.array(test_csv_df) y_train = np.eye(10)[y_train] X_test = X_test / 255 X_train = X_train / 255 model = Sequential() model.add(Dense(units=512, input_dim=784, activation='relu')) model.add(Dense(units=330, activation='relu')) model.add(Dense(units=212, activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dense(units=152, activation='relu')) model.add(Dense(units=152, activation='relu')) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, epochs=50, batch_size=6000)
code
90102656/cell_7
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 face_model = cv2.CascadeClassifier('../input/haarcascades/haarcascade_frontalface_default.xml') img = cv2.imread('../input/face-mask-detection/images/maksssksksss244.png') img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE) faces = face_model.detectMultiScale(img, scaleFactor=1.1, minNeighbors=4) out_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for x, y, w, h in faces: cv2.rectangle(out_img, (x, y), (x + w, y + h), (0, 0, 255), 1) plt.figure(figsize=(12, 12)) plt.imshow(out_img)
code
90102656/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator train_dir = '../input/face-mask-12k-images-dataset/Face Mask Dataset/Train' test_dir = '../input/face-mask-12k-images-dataset/Face Mask Dataset/Test' val_dir = '../input/face-mask-12k-images-dataset/Face Mask Dataset/Validation' train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True, zoom_range=0.2, shear_range=0.2) train_generator = train_datagen.flow_from_directory(directory=train_dir, target_size=(128, 128), class_mode='categorical', batch_size=32) val_datagen = ImageDataGenerator(rescale=1.0 / 255) val_generator = train_datagen.flow_from_directory(directory=val_dir, target_size=(128, 128), class_mode='categorical', batch_size=32) test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_generator = train_datagen.flow_from_directory(directory=val_dir, target_size=(128, 128), class_mode='categorical', batch_size=32)
code
90102656/cell_3
[ "text_plain_output_1.png" ]
import os print(os.listdir('../input')) print(os.listdir('../working'))
code
90102656/cell_17
[ "image_output_1.png" ]
from keras import Sequential from keras.applications.vgg19 import VGG19 from keras.layers import Flatten, Dense vgg19 = VGG19(weights='imagenet', include_top=False, input_shape=(128, 128, 3)) for layer in vgg19.layers: layer.trainable = False model = Sequential() model.add(vgg19) model.add(Flatten()) model.add(Dense(2, activation='sigmoid')) model.summary()
code
90102656/cell_10
[ "text_plain_output_1.png" ]
from scipy.spatial import distance import cv2 face_model = cv2.CascadeClassifier('../input/haarcascades/haarcascade_frontalface_default.xml') img = cv2.imread('../input/face-mask-detection/images/maksssksksss244.png') img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE) faces = face_model.detectMultiScale(img, scaleFactor=1.1, minNeighbors=4) out_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for x, y, w, h in faces: cv2.rectangle(out_img, (x, y), (x + w, y + h), (0, 0, 255), 1) MIN_DISTANCE = 130 if len(faces) >= 2: label = [0 for i in range(len(faces))] for i in range(len(faces) - 1): for j in range(i + 1, len(faces)): dist = distance.euclidean(faces[i][:2], faces[j][:2]) if dist < MIN_DISTANCE: label[i] = 1 label[j] = 1 new_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for i in range(len(faces)): x, y, w, h = faces[i] if label[i] == 1: cv2.rectangle(new_img, (x, y), (x + w, y + h), (255, 0, 0), 1) else: cv2.rectangle(new_img, (x, y), (x + w, y + h), (0, 255, 0), 1) plt.figure(figsize=(10, 10)) plt.imshow(new_img) else: print('No. of faces detected is less than 2')
code
89127402/cell_21
[ "text_plain_output_1.png" ]
from dateutil.relativedelta import relativedelta from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt import pandas as pd from dateutil.relativedelta import relativedelta # rolling averages and std def rolling_stat(timeseries, window_size): # Determing rolling statistics rolmean = timeseries.rolling(window = window_size).mean() rolstd = timeseries.rolling(window = window_size).std() # Plot rolling statistics: fig, ax = plt.subplots(figsize = (12, 4)) orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original') std = plt.plot(rolstd, color = 'black', label = 'Rolling Std') mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean') plt.legend(loc = 'best') plt.title('Rolling Mean and Standard Deviation') plt.grid() plt.show(block=False) # get n predictions for series by model def future_preds_df(model, series, num_steps): pred_first = series.index.max() + relativedelta(weeks = 1) pred_last = series.index.max() + relativedelta(weeks = num_steps) date_range_index = pd.date_range(pred_first, pred_last, freq = 'W') vals = model.predict(n_periods = num_steps) return pd.DataFrame(vals,index = date_range_index) # Augmented Dicky-Fuller Test def adf_test(timeseries): adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries) print("Test statistic: ", adf, 2) print("P-value: ", pvalue) print("Critical values: ", critical_values) # source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0) dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date']) dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True) dublin_air_data.set_index('date', inplace=True) dublin_air_data.head()
code
89127402/cell_13
[ "text_html_output_1.png" ]
import json import json import plotly.graph_objs as go import urllib.request def read_geojson(url): with urllib.request.urlopen(url) as url: jdata = json.loads(url.read().decode()) return jdata ireland_url = 'https://gist.githubusercontent.com/pnewall/9a122c05ba2865c3a58f15008548fbbd/raw/5bb4f84d918b871ee0e8b99f60dde976bb711d7c/ireland_counties.geojson' jdata = read_geojson(ireland_url) jdata['type'] print(jdata['features'][0].keys()) print(jdata['features'][0]['properties'])
code
89127402/cell_25
[ "text_html_output_1.png" ]
from dateutil.relativedelta import relativedelta from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt import pandas as pd from dateutil.relativedelta import relativedelta # rolling averages and std def rolling_stat(timeseries, window_size): # Determing rolling statistics rolmean = timeseries.rolling(window = window_size).mean() rolstd = timeseries.rolling(window = window_size).std() # Plot rolling statistics: fig, ax = plt.subplots(figsize = (12, 4)) orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original') std = plt.plot(rolstd, color = 'black', label = 'Rolling Std') mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean') plt.legend(loc = 'best') plt.title('Rolling Mean and Standard Deviation') plt.grid() plt.show(block=False) # get n predictions for series by model def future_preds_df(model, series, num_steps): pred_first = series.index.max() + relativedelta(weeks = 1) pred_last = series.index.max() + relativedelta(weeks = num_steps) date_range_index = pd.date_range(pred_first, pred_last, freq = 'W') vals = model.predict(n_periods = num_steps) return pd.DataFrame(vals,index = date_range_index) # Augmented Dicky-Fuller Test def adf_test(timeseries): adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries) print("Test statistic: ", adf, 2) print("P-value: ", pvalue) print("Critical values: ", critical_values) # source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0) dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date']) dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True) dublin_air_data.set_index('date', inplace=True) dublin_air_data.isnull().sum() dublin_air_data[dublin_air_data.isna().any(axis=1)] dublin_air_data.interpolate(inplace=True) dublin_air_data.isnull().sum().sum()
code
89127402/cell_23
[ "text_plain_output_1.png" ]
from dateutil.relativedelta import relativedelta from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt import pandas as pd from dateutil.relativedelta import relativedelta # rolling averages and std def rolling_stat(timeseries, window_size): # Determing rolling statistics rolmean = timeseries.rolling(window = window_size).mean() rolstd = timeseries.rolling(window = window_size).std() # Plot rolling statistics: fig, ax = plt.subplots(figsize = (12, 4)) orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original') std = plt.plot(rolstd, color = 'black', label = 'Rolling Std') mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean') plt.legend(loc = 'best') plt.title('Rolling Mean and Standard Deviation') plt.grid() plt.show(block=False) # get n predictions for series by model def future_preds_df(model, series, num_steps): pred_first = series.index.max() + relativedelta(weeks = 1) pred_last = series.index.max() + relativedelta(weeks = num_steps) date_range_index = pd.date_range(pred_first, pred_last, freq = 'W') vals = model.predict(n_periods = num_steps) return pd.DataFrame(vals,index = date_range_index) # Augmented Dicky-Fuller Test def adf_test(timeseries): adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries) print("Test statistic: ", adf, 2) print("P-value: ", pvalue) print("Critical values: ", critical_values) # source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0) dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date']) dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True) dublin_air_data.set_index('date', inplace=True) dublin_air_data.isnull().sum()
code
89127402/cell_20
[ "text_plain_output_1.png" ]
from dateutil.relativedelta import relativedelta from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt import pandas as pd from dateutil.relativedelta import relativedelta # rolling averages and std def rolling_stat(timeseries, window_size): # Determing rolling statistics rolmean = timeseries.rolling(window = window_size).mean() rolstd = timeseries.rolling(window = window_size).std() # Plot rolling statistics: fig, ax = plt.subplots(figsize = (12, 4)) orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original') std = plt.plot(rolstd, color = 'black', label = 'Rolling Std') mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean') plt.legend(loc = 'best') plt.title('Rolling Mean and Standard Deviation') plt.grid() plt.show(block=False) # get n predictions for series by model def future_preds_df(model, series, num_steps): pred_first = series.index.max() + relativedelta(weeks = 1) pred_last = series.index.max() + relativedelta(weeks = num_steps) date_range_index = pd.date_range(pred_first, pred_last, freq = 'W') vals = model.predict(n_periods = num_steps) return pd.DataFrame(vals,index = date_range_index) # Augmented Dicky-Fuller Test def adf_test(timeseries): adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries) print("Test statistic: ", adf, 2) print("P-value: ", pvalue) print("Critical values: ", critical_values) # source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0) dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date']) dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True) print('Unique timestamps: ', dublin_air_data.date.nunique()) print('All timestamps: ', dublin_air_data.shape[0])
code
89127402/cell_26
[ "text_plain_output_1.png" ]
from dateutil.relativedelta import relativedelta from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt import pandas as pd from dateutil.relativedelta import relativedelta # rolling averages and std def rolling_stat(timeseries, window_size): # Determing rolling statistics rolmean = timeseries.rolling(window = window_size).mean() rolstd = timeseries.rolling(window = window_size).std() # Plot rolling statistics: fig, ax = plt.subplots(figsize = (12, 4)) orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original') std = plt.plot(rolstd, color = 'black', label = 'Rolling Std') mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean') plt.legend(loc = 'best') plt.title('Rolling Mean and Standard Deviation') plt.grid() plt.show(block=False) # get n predictions for series by model def future_preds_df(model, series, num_steps): pred_first = series.index.max() + relativedelta(weeks = 1) pred_last = series.index.max() + relativedelta(weeks = num_steps) date_range_index = pd.date_range(pred_first, pred_last, freq = 'W') vals = model.predict(n_periods = num_steps) return pd.DataFrame(vals,index = date_range_index) # Augmented Dicky-Fuller Test def adf_test(timeseries): adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries) print("Test statistic: ", adf, 2) print("P-value: ", pvalue) print("Critical values: ", critical_values) # source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0) dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date']) dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True) dublin_air_data.set_index('date', inplace=True) dublin_air_data.isnull().sum() dublin_air_data[dublin_air_data.isna().any(axis=1)] dublin_air_data.interpolate(inplace=True) dublin_air_data.isnull().sum().sum() dublin_air_data.drop(columns=['i_rain', 'i_temp', 'i_wetb', 'i_wdsp', 'i_wddir'], inplace=True) dublin_air_data.nunique()
code
89127402/cell_2
[ "text_plain_output_1.png" ]
!pip install pmdarima
code
89127402/cell_19
[ "text_html_output_1.png" ]
from dateutil.relativedelta import relativedelta from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt import pandas as pd from dateutil.relativedelta import relativedelta # rolling averages and std def rolling_stat(timeseries, window_size): # Determing rolling statistics rolmean = timeseries.rolling(window = window_size).mean() rolstd = timeseries.rolling(window = window_size).std() # Plot rolling statistics: fig, ax = plt.subplots(figsize = (12, 4)) orig = plt.plot(timeseries, color = '#4DBEEE', label = 'Original') std = plt.plot(rolstd, color = 'black', label = 'Rolling Std') mean = plt.plot(rolmean, color = 'red', label = 'Rolling Mean') plt.legend(loc = 'best') plt.title('Rolling Mean and Standard Deviation') plt.grid() plt.show(block=False) # get n predictions for series by model def future_preds_df(model, series, num_steps): pred_first = series.index.max() + relativedelta(weeks = 1) pred_last = series.index.max() + relativedelta(weeks = num_steps) date_range_index = pd.date_range(pred_first, pred_last, freq = 'W') vals = model.predict(n_periods = num_steps) return pd.DataFrame(vals,index = date_range_index) # Augmented Dicky-Fuller Test def adf_test(timeseries): adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(timeseries) print("Test statistic: ", adf, 2) print("P-value: ", pvalue) print("Critical values: ", critical_values) # source: notebook 06f_DEMO_SARIMA_Prophet by IBM Specialized Models: Time Series and Survival Analysis course stations = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/station_list.csv', index_col=0) dublin_air_data = pd.read_csv('../input/hourly-weather-data-in-ireland-from-24-stations/Stations/532_dublin_airport.csv', index_col=0, parse_dates=['date']) dublin_air_data.rename(columns={'ind': 'i_rain', 'ind.1': 'i_temp', 'ind.2': 'i_wetb', 'ind.3': 'i_wdsp', 'ind.4': 'i_wddir'}, inplace=True) print('Column names: ', dublin_air_data.columns)
code