path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2023611/cell_5
[ "text_plain_output_1.png" ]
import h5py f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys
code
50224775/cell_9
[ "text_plain_output_1.png" ]
def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 50 array = [10, 20, 30, 40, 50, 60, 70] def selectionSort(array, size): for step in range(size): min_idx = step for i in range(step + 1, size): if array[i] < array[min_idx]: min_idx = i array[step], array[min_idx] = (array[min_idx], array[step]) data = [10, 5, 30, 15, 50, 6, 25] size = len(data) selectionSort(data, size) print('Sorted Array in Ascending Order:') print(data)
code
50224775/cell_2
[ "text_plain_output_1.png" ]
A = [1, 25, 35, 250, 500, 750, 1000] print(A)
code
50224775/cell_7
[ "text_plain_output_1.png" ]
def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 50 array = [10, 20, 30, 40, 50, 60, 70] print('Searching for {}'.format(element)) print('Index of {}: {}'.format(element, binary_search_recursive(array, element, 0, len(array))))
code
50224775/cell_3
[ "text_plain_output_1.png" ]
A = [1, 25, 35, 250, 500, 750, 1000] print(A.index(35))
code
50224775/cell_10
[ "text_plain_output_1.png" ]
def binary_search_recursive(array, element, start, end): if start > end: return -1 mid = (start + end) // 2 if element == array[mid]: return mid if element < array[mid]: return binary_search_recursive(array, element, start, mid - 1) else: return binary_search_recursive(array, element, mid + 1, end) element = 50 array = [10, 20, 30, 40, 50, 60, 70] def selectionSort(array, size): for step in range(size): min_idx = step for i in range(step + 1, size): if array[i] < array[min_idx]: min_idx = i array[step], array[min_idx] = (array[min_idx], array[step]) data = [10, 5, 30, 15, 50, 6, 25] size = len(data) selectionSort(data, size) def insertionSort(array): for step in range(1, len(array)): key = array[step] j = step - 1 while j >= 0 and key < array[j]: array[j + 1] = array[j] j = j - 1 array[j + 1] = key data = [10, 5, 30, 15, 50, 6, 25] insertionSort(data) print('Sorted Array in Ascending Order:') print(data)
code
50224775/cell_5
[ "text_plain_output_1.png" ]
def search(arr, n, x): for i in range(0, n): if arr[i] == x: return i return -1 arr = [10, 20, 30, 40, 50, 60, 70] x = 50 n = len(arr) result = search(arr, n, x) if result == -1: print('Elemen tidak ada di array') else: print('Elemen ada di indeks', result)
code
1010259/cell_13
[ "text_plain_output_1.png" ]
from scipy import sparse from sklearn import model_selection, preprocessing, ensemble from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import numpy as np import pandas as pd import xgboost as xgb def runXGB(train_X, train_y, test_X, test_y=None, feature_names=None, seed_val=0, num_rounds=1000): param = {} param['objective'] = 'multi:softprob' param['eta'] = 0.1 param['max_depth'] = 6 param['silent'] = 1 param['num_class'] = 3 param['eval_metric'] = 'mlogloss' param['min_child_weight'] = 1 param['subsample'] = 0.7 param['colsample_bytree'] = 0.7 param['seed'] = 8088 num_rounds = num_rounds plst = list(param.items()) xgtrain = xgb.DMatrix(train_X, label=train_y) if test_y is not None: xgtest = xgb.DMatrix(test_X, label=test_y) watchlist = [(xgtrain, 'train'), (xgtest, 'test')] model = xgb.train(plst, xgtrain, num_rounds, watchlist, early_stopping_rounds=20, verbose_eval=25) else: xgtest = xgb.DMatrix(test_X) model = xgb.train(plst, xgtrain, num_rounds) pred_test_y = model.predict(xgtest) return (pred_test_y, model) data_path = '../input/' train_file = data_path + 'train.json' test_file = data_path + 'test.json' train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) features_to_use = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price'] train_df['num_photos'] = train_df['photos'].apply(len) test_df['num_photos'] = test_df['photos'].apply(len) train_df['num_features'] = train_df['features'].apply(len) test_df['num_features'] = test_df['features'].apply(len) train_df['num_description_words'] = train_df['description'].apply(lambda x: len(x.split(' '))) test_df['num_description_words'] = test_df['description'].apply(lambda x: len(x.split(' '))) train_df['created'] = pd.to_datetime(train_df['created']) test_df['created'] = pd.to_datetime(test_df['created']) train_df['created_year'] = train_df['created'].dt.year test_df['created_year'] = test_df['created'].dt.year train_df['created_month'] = train_df['created'].dt.month test_df['created_month'] = test_df['created'].dt.month train_df['created_day'] = train_df['created'].dt.day test_df['created_day'] = test_df['created'].dt.day train_df['created_hour'] = train_df['created'].dt.hour test_df['created_hour'] = test_df['created'].dt.hour train_df['price_per_bed'] = train_df['price'] / train_df['bedrooms'].clip(lower=1) test_df['price_per_bed'] = test_df['price'] / test_df['bedrooms'].clip(lower=1) train_df['created_date'] = np.array(train_df.created.values, dtype='datetime64[D]').astype(np.float32) test_df['created_date'] = np.array(test_df.created.values, dtype='datetime64[D]').astype(np.float32) features_to_use.extend(['num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'listing_id', 'created_hour', 'price_per_bed', 'created_date']) categorical = ['display_address', 'manager_id', 'building_id', 'street_address'] for f in categorical: if train_df[f].dtype == 'object': lbl = preprocessing.LabelEncoder() lbl.fit(list(train_df[f].values) + list(test_df[f].values)) train_df[f] = lbl.transform(list(train_df[f].values)) test_df[f] = lbl.transform(list(test_df[f].values)) features_to_use.append(f) train_df['features'] = train_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) test_df['features'] = test_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) tfidf = CountVectorizer(stop_words='english', max_features=200) tr_sparse = tfidf.fit_transform(train_df['features']) te_sparse = tfidf.transform(test_df['features']) features_to_use = [i for i in features_to_use if 'manager_id' not in i] train_X = sparse.hstack([train_df[features_to_use], tr_sparse]).tocsr() test_X = sparse.hstack([test_df[features_to_use], te_sparse]).tocsr() features_sparse = ['features_' + str(i) for i in range(tr_sparse.shape[1])] target_num_map = {'high': 0, 'medium': 1, 'low': 2} train_y = np.array(train_df['interest_level'].apply(lambda x: target_num_map[x])) fscores = model.get_fscore() df_features = pd.DataFrame(fscores, index=['score']).T.reset_index() df_features['f_index'] = df_features['index'].apply(lambda x: int(x[1:])) df_features['f_name'] = np.array(features_to_use + features_sparse)[df_features['f_index'].values] pd.Series(index=df_features['f_name'].values, data=df_features['score'].values).sort_values()[-30:].plot(kind='bar')
code
1010259/cell_9
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import numpy as np import pandas as pd data_path = '../input/' train_file = data_path + 'train.json' test_file = data_path + 'test.json' train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) features_to_use = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price'] train_df['num_photos'] = train_df['photos'].apply(len) test_df['num_photos'] = test_df['photos'].apply(len) train_df['num_features'] = train_df['features'].apply(len) test_df['num_features'] = test_df['features'].apply(len) train_df['num_description_words'] = train_df['description'].apply(lambda x: len(x.split(' '))) test_df['num_description_words'] = test_df['description'].apply(lambda x: len(x.split(' '))) train_df['created'] = pd.to_datetime(train_df['created']) test_df['created'] = pd.to_datetime(test_df['created']) train_df['created_year'] = train_df['created'].dt.year test_df['created_year'] = test_df['created'].dt.year train_df['created_month'] = train_df['created'].dt.month test_df['created_month'] = test_df['created'].dt.month train_df['created_day'] = train_df['created'].dt.day test_df['created_day'] = test_df['created'].dt.day train_df['created_hour'] = train_df['created'].dt.hour test_df['created_hour'] = test_df['created'].dt.hour train_df['price_per_bed'] = train_df['price'] / train_df['bedrooms'].clip(lower=1) test_df['price_per_bed'] = test_df['price'] / test_df['bedrooms'].clip(lower=1) train_df['created_date'] = np.array(train_df.created.values, dtype='datetime64[D]').astype(np.float32) test_df['created_date'] = np.array(test_df.created.values, dtype='datetime64[D]').astype(np.float32) features_to_use.extend(['num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'listing_id', 'created_hour', 'price_per_bed', 'created_date']) train_df['features'] = train_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) test_df['features'] = test_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) print(train_df['features'].head()) tfidf = CountVectorizer(stop_words='english', max_features=200) tr_sparse = tfidf.fit_transform(train_df['features']) te_sparse = tfidf.transform(test_df['features'])
code
1010259/cell_11
[ "text_plain_output_1.png" ]
from scipy import sparse from sklearn import model_selection, preprocessing, ensemble from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import numpy as np import pandas as pd data_path = '../input/' train_file = data_path + 'train.json' test_file = data_path + 'test.json' train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) features_to_use = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price'] train_df['num_photos'] = train_df['photos'].apply(len) test_df['num_photos'] = test_df['photos'].apply(len) train_df['num_features'] = train_df['features'].apply(len) test_df['num_features'] = test_df['features'].apply(len) train_df['num_description_words'] = train_df['description'].apply(lambda x: len(x.split(' '))) test_df['num_description_words'] = test_df['description'].apply(lambda x: len(x.split(' '))) train_df['created'] = pd.to_datetime(train_df['created']) test_df['created'] = pd.to_datetime(test_df['created']) train_df['created_year'] = train_df['created'].dt.year test_df['created_year'] = test_df['created'].dt.year train_df['created_month'] = train_df['created'].dt.month test_df['created_month'] = test_df['created'].dt.month train_df['created_day'] = train_df['created'].dt.day test_df['created_day'] = test_df['created'].dt.day train_df['created_hour'] = train_df['created'].dt.hour test_df['created_hour'] = test_df['created'].dt.hour train_df['price_per_bed'] = train_df['price'] / train_df['bedrooms'].clip(lower=1) test_df['price_per_bed'] = test_df['price'] / test_df['bedrooms'].clip(lower=1) train_df['created_date'] = np.array(train_df.created.values, dtype='datetime64[D]').astype(np.float32) test_df['created_date'] = np.array(test_df.created.values, dtype='datetime64[D]').astype(np.float32) features_to_use.extend(['num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'listing_id', 'created_hour', 'price_per_bed', 'created_date']) categorical = ['display_address', 'manager_id', 'building_id', 'street_address'] for f in categorical: if train_df[f].dtype == 'object': lbl = preprocessing.LabelEncoder() lbl.fit(list(train_df[f].values) + list(test_df[f].values)) train_df[f] = lbl.transform(list(train_df[f].values)) test_df[f] = lbl.transform(list(test_df[f].values)) features_to_use.append(f) train_df['features'] = train_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) test_df['features'] = test_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) tfidf = CountVectorizer(stop_words='english', max_features=200) tr_sparse = tfidf.fit_transform(train_df['features']) te_sparse = tfidf.transform(test_df['features']) features_to_use = [i for i in features_to_use if 'manager_id' not in i] train_X = sparse.hstack([train_df[features_to_use], tr_sparse]).tocsr() test_X = sparse.hstack([test_df[features_to_use], te_sparse]).tocsr() features_sparse = ['features_' + str(i) for i in range(tr_sparse.shape[1])] target_num_map = {'high': 0, 'medium': 1, 'low': 2} train_y = np.array(train_df['interest_level'].apply(lambda x: target_num_map[x])) print(train_X.shape, test_X.shape)
code
1010259/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data_path = '../input/' train_file = data_path + 'train.json' test_file = data_path + 'test.json' train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) features_to_use = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price'] train_df['num_photos'] = train_df['photos'].apply(len) test_df['num_photos'] = test_df['photos'].apply(len) train_df['num_features'] = train_df['features'].apply(len) test_df['num_features'] = test_df['features'].apply(len) train_df['num_description_words'] = train_df['description'].apply(lambda x: len(x.split(' '))) test_df['num_description_words'] = test_df['description'].apply(lambda x: len(x.split(' '))) train_df['created'] = pd.to_datetime(train_df['created']) test_df['created'] = pd.to_datetime(test_df['created']) train_df['created_year'] = train_df['created'].dt.year test_df['created_year'] = test_df['created'].dt.year train_df['created_month'] = train_df['created'].dt.month test_df['created_month'] = test_df['created'].dt.month train_df['created_day'] = train_df['created'].dt.day test_df['created_day'] = test_df['created'].dt.day train_df['created_hour'] = train_df['created'].dt.hour test_df['created_hour'] = test_df['created'].dt.hour train_df['price_per_bed'] = train_df['price'] / train_df['bedrooms'].clip(lower=1) test_df['price_per_bed'] = test_df['price'] / test_df['bedrooms'].clip(lower=1) train_df['created_date'] = np.array(train_df.created.values, dtype='datetime64[D]').astype(np.float32) test_df['created_date'] = np.array(test_df.created.values, dtype='datetime64[D]').astype(np.float32) features_to_use.extend(['num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'listing_id', 'created_hour', 'price_per_bed', 'created_date']) train_df.head()
code
1010259/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
"""preds, model = runXGB(train_X, train_y, test_X, num_rounds=400) out_df = pd.DataFrame(preds) out_df.columns = ["high", "medium", "low"] out_df["listing_id"] = test_df.listing_id.values out_df.to_csv("0315.csv", index=False)"""
code
1010259/cell_12
[ "text_plain_output_1.png" ]
from scipy import sparse from sklearn import model_selection, preprocessing, ensemble from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics import log_loss import numpy as np import pandas as pd import xgboost as xgb def runXGB(train_X, train_y, test_X, test_y=None, feature_names=None, seed_val=0, num_rounds=1000): param = {} param['objective'] = 'multi:softprob' param['eta'] = 0.1 param['max_depth'] = 6 param['silent'] = 1 param['num_class'] = 3 param['eval_metric'] = 'mlogloss' param['min_child_weight'] = 1 param['subsample'] = 0.7 param['colsample_bytree'] = 0.7 param['seed'] = 8088 num_rounds = num_rounds plst = list(param.items()) xgtrain = xgb.DMatrix(train_X, label=train_y) if test_y is not None: xgtest = xgb.DMatrix(test_X, label=test_y) watchlist = [(xgtrain, 'train'), (xgtest, 'test')] model = xgb.train(plst, xgtrain, num_rounds, watchlist, early_stopping_rounds=20, verbose_eval=25) else: xgtest = xgb.DMatrix(test_X) model = xgb.train(plst, xgtrain, num_rounds) pred_test_y = model.predict(xgtest) return (pred_test_y, model) data_path = '../input/' train_file = data_path + 'train.json' test_file = data_path + 'test.json' train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) features_to_use = ['bathrooms', 'bedrooms', 'latitude', 'longitude', 'price'] train_df['num_photos'] = train_df['photos'].apply(len) test_df['num_photos'] = test_df['photos'].apply(len) train_df['num_features'] = train_df['features'].apply(len) test_df['num_features'] = test_df['features'].apply(len) train_df['num_description_words'] = train_df['description'].apply(lambda x: len(x.split(' '))) test_df['num_description_words'] = test_df['description'].apply(lambda x: len(x.split(' '))) train_df['created'] = pd.to_datetime(train_df['created']) test_df['created'] = pd.to_datetime(test_df['created']) train_df['created_year'] = train_df['created'].dt.year test_df['created_year'] = test_df['created'].dt.year train_df['created_month'] = train_df['created'].dt.month test_df['created_month'] = test_df['created'].dt.month train_df['created_day'] = train_df['created'].dt.day test_df['created_day'] = test_df['created'].dt.day train_df['created_hour'] = train_df['created'].dt.hour test_df['created_hour'] = test_df['created'].dt.hour train_df['price_per_bed'] = train_df['price'] / train_df['bedrooms'].clip(lower=1) test_df['price_per_bed'] = test_df['price'] / test_df['bedrooms'].clip(lower=1) train_df['created_date'] = np.array(train_df.created.values, dtype='datetime64[D]').astype(np.float32) test_df['created_date'] = np.array(test_df.created.values, dtype='datetime64[D]').astype(np.float32) features_to_use.extend(['num_photos', 'num_features', 'num_description_words', 'created_year', 'created_month', 'created_day', 'listing_id', 'created_hour', 'price_per_bed', 'created_date']) categorical = ['display_address', 'manager_id', 'building_id', 'street_address'] for f in categorical: if train_df[f].dtype == 'object': lbl = preprocessing.LabelEncoder() lbl.fit(list(train_df[f].values) + list(test_df[f].values)) train_df[f] = lbl.transform(list(train_df[f].values)) test_df[f] = lbl.transform(list(test_df[f].values)) features_to_use.append(f) train_df['features'] = train_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) test_df['features'] = test_df['features'].apply(lambda x: ' '.join(['_'.join(i.split(' ')) for i in x])) tfidf = CountVectorizer(stop_words='english', max_features=200) tr_sparse = tfidf.fit_transform(train_df['features']) te_sparse = tfidf.transform(test_df['features']) features_to_use = [i for i in features_to_use if 'manager_id' not in i] train_X = sparse.hstack([train_df[features_to_use], tr_sparse]).tocsr() test_X = sparse.hstack([test_df[features_to_use], te_sparse]).tocsr() features_sparse = ['features_' + str(i) for i in range(tr_sparse.shape[1])] target_num_map = {'high': 0, 'medium': 1, 'low': 2} train_y = np.array(train_df['interest_level'].apply(lambda x: target_num_map[x])) cv_scores = [] kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=2016) for dev_index, val_index in kf.split(range(train_X.shape[0])): dev_X, val_X = (train_X[dev_index, :], train_X[val_index, :]) dev_y, val_y = (train_y[dev_index], train_y[val_index]) preds, model = runXGB(dev_X, dev_y, val_X, val_y) cv_scores.append(log_loss(val_y, preds)) print(cv_scores) break
code
1010259/cell_5
[ "text_html_output_1.png" ]
import pandas as pd data_path = '../input/' train_file = data_path + 'train.json' test_file = data_path + 'test.json' train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) print(train_df.shape) print(test_df.shape)
code
2011002/cell_19
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import tensorflow as tf dataset = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def one_hot(num): output = np.zeros([1, 10]) output[0, num] = 1 return output.astype(int) labels_encoded = np.array([np.array([int(i == l) for i in range(10)]) for l in dataset.iloc[:, 0].values]) dataset = dataset.drop('label', axis=1) (dataset.shape, labels_encoded.shape) train_size = 40000 validation_size = 2000 train = dataset.iloc[:train_size] train_targets = labels_encoded[:train_size] validation = dataset.iloc[train_size:] validation_targets = labels_encoded[train_size:] (train.shape, train_targets.shape, validation.shape, validation_targets.shape, test.shape) input_size = 784 output_size = 10 hidden_layer_size = 50 tf.reset_default_graph() inputs = tf.placeholder(tf.float32, [None, input_size]) targets = tf.placeholder(tf.float32, [None, output_size]) w_1 = tf.get_variable('w_1', [input_size, hidden_layer_size]) b_1 = tf.get_variable('b_1', [hidden_layer_size]) o_1 = tf.nn.relu(tf.matmul(inputs, w_1) + b_1) w_2 = tf.get_variable('w_2', [hidden_layer_size, hidden_layer_size]) b_2 = tf.get_variable('b_2', [hidden_layer_size]) o_2 = tf.nn.relu(tf.matmul(o_1, w_2) + b_2) w_3 = tf.get_variable('w_3', [hidden_layer_size, output_size]) b_3 = tf.get_variable('b_3', [output_size]) outputs = tf.matmul(o_2, w_3) + b_3 loss = tf.nn.softmax_cross_entropy_with_logits(logits=outputs, labels=targets) mean_loss = tf.reduce_mean(loss) optimize = tf.train.AdamOptimizer(learning_rate=0.001).minimize(mean_loss) out_equal_target = tf.equal(tf.argmax(outputs, 1), tf.argmax(targets, 1)) accuracy = tf.reduce_mean(tf.cast(out_equal_target, tf.float32)) sess = tf.InteractiveSession() initializer = tf.global_variables_initializer() sess.run(initializer) batch_size = 100 batch_number = train.shape[0] // batch_size max_epoch = 15 prev_validation_loss = 9999999.0 for epoch_counter in range(max_epoch): curr_epoch_loss = 0 start = 0 end = start + batch_size for batch_counter in range(batch_number): input_batch = np.multiply(train.iloc[start:end].values.astype(np.float32), 1.0 / 255.0) target_batch = train_targets[start:end] start = end end = start + batch_size _, batch_loss = sess.run([optimize, mean_loss], feed_dict={inputs: input_batch, targets: target_batch}) curr_epoch_loss += batch_loss curr_epoch_loss /= batch_number input_batch = np.multiply(validation.values.astype(np.float32), 1.0 / 255.0) target_batch = validation_targets val_loss, val_accuracy = sess.run([mean_loss, accuracy], feed_dict={inputs: input_batch, targets: target_batch}) print('Epoch ' + str(epoch_counter + 1) + '. Training loss: ' + '{0:.3f}'.format(curr_epoch_loss) + '. Validation loss: ' + '{0:.3f}'.format(val_loss) + '. Validation accuracy: ' + '{0:.2f}'.format(val_accuracy * 100.0) + '%') if val_loss > prev_validation_loss: break prev_validation_loss = val_loss print('End of Training')
code
2011002/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import tensorflow as tf
code
2011002/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd dataset = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def one_hot(num): output = np.zeros([1, 10]) output[0, num] = 1 return output.astype(int) labels_encoded = np.array([np.array([int(i == l) for i in range(10)]) for l in dataset.iloc[:, 0].values]) dataset = dataset.drop('label', axis=1) (dataset.shape, labels_encoded.shape) train_size = 40000 validation_size = 2000 train = dataset.iloc[:train_size] train_targets = labels_encoded[:train_size] validation = dataset.iloc[train_size:] validation_targets = labels_encoded[train_size:] (train.shape, train_targets.shape, validation.shape, validation_targets.shape, test.shape)
code
2011002/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') dataset.head()
code
2011002/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd dataset = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def one_hot(num): output = np.zeros([1, 10]) output[0, num] = 1 return output.astype(int) labels_encoded = np.array([np.array([int(i == l) for i in range(10)]) for l in dataset.iloc[:, 0].values]) dataset = dataset.drop('label', axis=1) (dataset.shape, labels_encoded.shape)
code
130015070/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.preprocessing.sequence import pad_sequences import nltk import pandas as pd import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=2000, oov_token='<OOV>') tokenizer.fit_on_texts(x) word_index = tokenizer.word_index x_train = pad_sequences(tokenizer.texts_to_sequences(x_train), maxlen=100, padding='post', truncating='post') x_test = pad_sequences(tokenizer.texts_to_sequences(x_test), maxlen=100, padding='post', truncating='post') x_val = pad_sequences(tokenizer.texts_to_sequences(x_val), maxlen=100, padding='post', truncating='post') model = tf.keras.Sequential([tf.keras.layers.Embedding(2000, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)), tf.keras.layers.Dropout(rate=0.2), tf.keras.layers.Dense(64, activation='swish'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.summary() early_stopping_monitor = EarlyStopping(patience=2) history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val), callbacks=[early_stopping_monitor])
code
130015070/cell_9
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords import nltk import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x
code
130015070/cell_25
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.metrics import classification_report, confusion_matrix from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.preprocessing.sequence import pad_sequences import nltk import pandas as pd import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=2000, oov_token='<OOV>') tokenizer.fit_on_texts(x) word_index = tokenizer.word_index x_train = pad_sequences(tokenizer.texts_to_sequences(x_train), maxlen=100, padding='post', truncating='post') x_test = pad_sequences(tokenizer.texts_to_sequences(x_test), maxlen=100, padding='post', truncating='post') x_val = pad_sequences(tokenizer.texts_to_sequences(x_val), maxlen=100, padding='post', truncating='post') model = tf.keras.Sequential([tf.keras.layers.Embedding(2000, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)), tf.keras.layers.Dropout(rate=0.2), tf.keras.layers.Dense(64, activation='swish'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.summary() early_stopping_monitor = EarlyStopping(patience=2) history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val), callbacks=[early_stopping_monitor]) y_pred = model.predict(x_test).round().T[0] y_pred print(classification_report(y_test.astype(int), y_pred, digits=3))
code
130015070/cell_23
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.preprocessing.sequence import pad_sequences import nltk import pandas as pd import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=2000, oov_token='<OOV>') tokenizer.fit_on_texts(x) word_index = tokenizer.word_index x_train = pad_sequences(tokenizer.texts_to_sequences(x_train), maxlen=100, padding='post', truncating='post') x_test = pad_sequences(tokenizer.texts_to_sequences(x_test), maxlen=100, padding='post', truncating='post') x_val = pad_sequences(tokenizer.texts_to_sequences(x_val), maxlen=100, padding='post', truncating='post') model = tf.keras.Sequential([tf.keras.layers.Embedding(2000, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)), tf.keras.layers.Dropout(rate=0.2), tf.keras.layers.Dense(64, activation='swish'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.summary() early_stopping_monitor = EarlyStopping(patience=2) history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val), callbacks=[early_stopping_monitor]) y_pred = model.predict(x_test).round().T[0] y_pred
code
130015070/cell_26
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.metrics import classification_report, confusion_matrix from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.preprocessing.sequence import pad_sequences import nltk import numpy as np import pandas as pd import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=2000, oov_token='<OOV>') tokenizer.fit_on_texts(x) word_index = tokenizer.word_index x_train = pad_sequences(tokenizer.texts_to_sequences(x_train), maxlen=100, padding='post', truncating='post') x_test = pad_sequences(tokenizer.texts_to_sequences(x_test), maxlen=100, padding='post', truncating='post') x_val = pad_sequences(tokenizer.texts_to_sequences(x_val), maxlen=100, padding='post', truncating='post') model = tf.keras.Sequential([tf.keras.layers.Embedding(2000, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)), tf.keras.layers.Dropout(rate=0.2), tf.keras.layers.Dense(64, activation='swish'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.summary() early_stopping_monitor = EarlyStopping(patience=2) history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val), callbacks=[early_stopping_monitor]) y_pred = model.predict(x_test).round().T[0] y_pred y_rand = np.random.random(*y_test.shape).round().astype(int) (y_rand == y_test).mean()
code
130015070/cell_11
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords import nltk import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x y = df.cyberbullying_type != 'not_cyberbullying' y
code
130015070/cell_19
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from tensorflow.keras.preprocessing.sequence import pad_sequences import nltk import pandas as pd import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=2000, oov_token='<OOV>') tokenizer.fit_on_texts(x) word_index = tokenizer.word_index x_train = pad_sequences(tokenizer.texts_to_sequences(x_train), maxlen=100, padding='post', truncating='post') x_test = pad_sequences(tokenizer.texts_to_sequences(x_test), maxlen=100, padding='post', truncating='post') x_val = pad_sequences(tokenizer.texts_to_sequences(x_val), maxlen=100, padding='post', truncating='post') model = tf.keras.Sequential([tf.keras.layers.Embedding(2000, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)), tf.keras.layers.Dropout(rate=0.2), tf.keras.layers.Dense(64, activation='swish'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.summary()
code
130015070/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000))
code
130015070/cell_3
[ "text_plain_output_1.png" ]
import nltk import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random
code
130015070/cell_24
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.preprocessing.sequence import pad_sequences import nltk import pandas as pd import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=2000, oov_token='<OOV>') tokenizer.fit_on_texts(x) word_index = tokenizer.word_index x_train = pad_sequences(tokenizer.texts_to_sequences(x_train), maxlen=100, padding='post', truncating='post') x_test = pad_sequences(tokenizer.texts_to_sequences(x_test), maxlen=100, padding='post', truncating='post') x_val = pad_sequences(tokenizer.texts_to_sequences(x_val), maxlen=100, padding='post', truncating='post') model = tf.keras.Sequential([tf.keras.layers.Embedding(2000, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)), tf.keras.layers.Dropout(rate=0.2), tf.keras.layers.Dense(64, activation='swish'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.summary() early_stopping_monitor = EarlyStopping(patience=2) history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val), callbacks=[early_stopping_monitor]) y_pred = model.predict(x_test).round().T[0] y_pred (y_pred == y_test).mean()
code
130015070/cell_27
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.metrics import classification_report, confusion_matrix from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.preprocessing.sequence import pad_sequences import nltk import numpy as np import pandas as pd import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import functools, re import random df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df df.cyberbullying_type.value_counts().plot.barh(xlim=(7800, 8000)) stopwords = [i.lower() for i in nltk.corpus.stopwords.words('english') + [chr(i) for i in range(97, 123)]] x = df.tweet_text.apply(lambda text: re.sub('\\s+', ' ', ' '.join([i for i in re.sub('[^9A-Za-z ]', '', re.sub('\\n', '', re.sub('\\s+', ' ', re.sub('http\\S+', '', text.lower())))).split(' ') if i not in stopwords]))).values.astype(str) x tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=2000, oov_token='<OOV>') tokenizer.fit_on_texts(x) word_index = tokenizer.word_index x_train = pad_sequences(tokenizer.texts_to_sequences(x_train), maxlen=100, padding='post', truncating='post') x_test = pad_sequences(tokenizer.texts_to_sequences(x_test), maxlen=100, padding='post', truncating='post') x_val = pad_sequences(tokenizer.texts_to_sequences(x_val), maxlen=100, padding='post', truncating='post') model = tf.keras.Sequential([tf.keras.layers.Embedding(2000, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)), tf.keras.layers.Dropout(rate=0.2), tf.keras.layers.Dense(64, activation='swish'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.summary() early_stopping_monitor = EarlyStopping(patience=2) history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val), callbacks=[early_stopping_monitor]) y_pred = model.predict(x_test).round().T[0] y_pred y_rand = np.random.random(*y_test.shape).round().astype(int) (y_rand == y_test).mean() print(classification_report(y_test.astype(int), y_rand, digits=3))
code
130015070/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/cyberbullying-classification/cyberbullying_tweets.csv') df
code
88102154/cell_9
[ "text_plain_output_1.png" ]
from tensorflow import keras from tensorflow.keras.utils import plot_model def build_model(): inputs = keras.layers.Input(shape=(28, 28, 1)) conv1 = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(inputs) conv2 = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(conv1) maxpool1 = keras.layers.MaxPool2D((2, 2))(conv2) conv3 = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(maxpool1) conv4 = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(conv3) maxpool2 = keras.layers.MaxPool2D((2, 2))(conv4) flat = keras.layers.Flatten()(maxpool2) x1 = keras.layers.Dense(1024, activation='relu')(flat) x1 = keras.layers.Dropout(0.1)(x1) x = keras.layers.Concatenate(axis=1)([flat, x1]) x2 = keras.layers.Dense(512, activation='relu')(x) x2 = keras.layers.Dropout(0.1)(x2) x = keras.layers.Concatenate(axis=1)([flat, x1, x2]) x3 = keras.layers.Dense(256, activation='relu')(x) x3 = keras.layers.Dropout(0.1)(x3) x = keras.layers.Concatenate(axis=1)([flat, x1, x2, x3]) x4 = keras.layers.Dense(128, activation='relu')(x) x4 = keras.layers.Dropout(0.1)(x4) x = keras.layers.Concatenate(axis=1)([flat, x1, x2, x3, x4]) x = keras.layers.Dense(64, activation='relu')(x) x = keras.layers.Dense(10, activation='softmax')(x) model = keras.models.Model(inputs=inputs, outputs=x) model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) return model build_model().summary() from tensorflow.keras.utils import plot_model plot_model(build_model(), show_shapes=True)
code
88102154/cell_4
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_8.png", "image_output_6.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_11.png", "image_output_9.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array import pandas as pd train = pd.read_csv('../input/digit-recognizer/train.csv') test = pd.read_csv('../input/digit-recognizer/test.csv') sample = pd.read_csv('../input/digit-recognizer/sample_submission.csv') y = train.pop('label') plt.rcParams['figure.figsize'] = (12, 18) _ = plt.imshow(array_to_img(train.values.reshape(-1, 28, 28, 1)[10]), cmap='gray') plt.axis('off') print(y[10])
code
88102154/cell_10
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import KFold, train_test_split, StratifiedKFold from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array from tqdm.keras import TqdmCallback import numpy as np import pandas as pd train = pd.read_csv('../input/digit-recognizer/train.csv') test = pd.read_csv('../input/digit-recognizer/test.csv') sample = pd.read_csv('../input/digit-recognizer/sample_submission.csv') y = train.pop('label') plt.rcParams['figure.figsize'] = (12, 18) _ = plt.imshow(array_to_img(train.values.reshape(-1, 28, 28, 1)[10]), cmap = 'gray') plt.axis('off') print(y[10]) kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=4) def build_model(): inputs = keras.layers.Input(shape=(28, 28, 1)) conv1 = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(inputs) conv2 = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(conv1) maxpool1 = keras.layers.MaxPool2D((2, 2))(conv2) conv3 = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(maxpool1) conv4 = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(conv3) maxpool2 = keras.layers.MaxPool2D((2, 2))(conv4) flat = keras.layers.Flatten()(maxpool2) x1 = keras.layers.Dense(1024, activation='relu')(flat) x1 = keras.layers.Dropout(0.1)(x1) x = keras.layers.Concatenate(axis=1)([flat, x1]) x2 = keras.layers.Dense(512, activation='relu')(x) x2 = keras.layers.Dropout(0.1)(x2) x = keras.layers.Concatenate(axis=1)([flat, x1, x2]) x3 = keras.layers.Dense(256, activation='relu')(x) x3 = keras.layers.Dropout(0.1)(x3) x = keras.layers.Concatenate(axis=1)([flat, x1, x2, x3]) x4 = keras.layers.Dense(128, activation='relu')(x) x4 = keras.layers.Dropout(0.1)(x4) x = keras.layers.Concatenate(axis=1)([flat, x1, x2, x3, x4]) x = keras.layers.Dense(64, activation='relu')(x) x = keras.layers.Dense(10, activation='softmax')(x) model = keras.models.Model(inputs=inputs, outputs=x) model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) return model def build_model2(): inputs = keras.layers.Input(shape=(28, 28, 1)) conv = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(inputs) conv = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(conv) maxpool = keras.layers.MaxPool2D((2, 2))(conv) conv = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(maxpool) conv = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(conv) maxpool = keras.layers.MaxPool2D((2, 2))(conv) flat = keras.layers.Flatten()(maxpool) x1 = keras.layers.Dense(1024, activation='relu')(flat) x1 = keras.layers.Dropout(0.1)(x1) x2 = keras.layers.Dense(512, activation='relu')(x1) x2 = keras.layers.Dropout(0.1)(x2) x3 = keras.layers.Dense(256, activation='relu')(x2) x3 = keras.layers.Dropout(0.1)(x3) x4 = keras.layers.Dense(128, activation='relu')(x3) x4 = keras.layers.Dropout(0.1)(x4) x1_ = keras.layers.Dense(1024, activation='relu')(flat) x1_ = keras.layers.Dropout(0.1)(x1_) x2_ = keras.layers.Dense(512, activation='relu')(x1_) x2_ = keras.layers.Dropout(0.1)(x2_) x3_ = keras.layers.Dense(256, activation='relu')(x2_) x3_ = keras.layers.Dropout(0.1)(x3_) x4_ = keras.layers.Dense(128, activation='relu')(x3_) x4_ = keras.layers.Dropout(0.1)(x4_) x = keras.layers.Concatenate(axis=1)([flat, x4, x4_]) x = keras.layers.Dense(64, activation='relu')(x) x = keras.layers.Dense(10, activation='softmax')(x) model = keras.models.Model(inputs=inputs, outputs=x) model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) return model def build_model3(): inputs = keras.layers.Input(shape=(28, 28, 1)) conv = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(inputs) conv = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(conv) maxpool = keras.layers.MaxPool2D((2, 2))(conv) conv = keras.layers.Conv2D(64, (3, 3), strides=1, activation='relu')(maxpool) conv = keras.layers.Conv2D(32, (3, 3), strides=1, activation='relu')(conv) maxpool = keras.layers.MaxPool2D((2, 2))(conv) flat = keras.layers.Flatten()(maxpool) x1 = keras.layers.Dense(1024, activation='relu')(flat) x1 = keras.layers.Dropout(0.1)(x1) x2 = keras.layers.Dense(512, activation='relu')(x1) x2 = keras.layers.Dropout(0.1)(x2) x3 = keras.layers.Dense(256, activation='relu')(x2) x3 = keras.layers.Dropout(0.1)(x3) x4 = keras.layers.Dense(128, activation='relu')(x3) x4 = keras.layers.Dropout(0.1)(x4) x1_ = keras.layers.Dense(1024, activation='relu')(flat) x1_ = keras.layers.Dropout(0.1)(x1_) x2_ = keras.layers.Dense(512, activation='relu')(x1_) x2_ = keras.layers.Dropout(0.1)(x2_) x3_ = keras.layers.Dense(256, activation='relu')(x2_) x3_ = keras.layers.Dropout(0.1)(x3_) x4_ = keras.layers.Dense(128, activation='relu')(x3_) x4_ = keras.layers.Dropout(0.1)(x4_) x = keras.layers.Concatenate(axis=1)([x1, x2, x3, x4, x1_, x2_, x3_, x4_, flat]) x = keras.layers.Dense(64, activation='relu')(x) x = keras.layers.Dense(10, activation='softmax')(x) model = keras.models.Model(inputs=inputs, outputs=x) model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) return model scores = [] test_preds = [] for i, (t, v) in enumerate(kf.split(train, y)): K.clear_session() xtrain = train.iloc[t, :].values.reshape(-1, 28, 28, 1) xval = train.iloc[v, :].values.reshape(-1, 28, 28, 1) xtest = test.copy().values.reshape(-1, 28, 28, 1) ytrain = y[t] yval = y[v] train_gen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=20, width_shift_range=0.3, height_shift_range=0.3) val_gen = ImageDataGenerator(rescale=1.0 / 255) train_datagen = train_gen.flow(xtrain, ytrain, batch_size=64, seed=4) val_datagen = val_gen.flow(xval, yval, batch_size=64, seed=4) test_datagen = val_gen.flow(xtest, batch_size=64, shuffle=False) callbacks = [keras.callbacks.EarlyStopping(patience=28, mode='min', monitor='val_loss', restore_best_weights=True), keras.callbacks.ReduceLROnPlateau(patience=5, mode='min', monitor='val_loss', factor=0.5, min_lr=1e-05), TqdmCallback(verbose=1)] model = build_model() history = model.fit(train_datagen, validation_data=val_datagen, epochs=150, callbacks=callbacks, verbose=0) history = pd.DataFrame(history.history) _ = history.loc[4:, ['val_loss', 'loss']].plot() plt.show() loss, accu = model.evaluate(val_datagen) ypred = model.predict(test_datagen) test_preds.append(ypred) scores.append(accu) print(f'{i} : {accu}') del xtrain del xval del xtest del ytrain del yval del train_gen del val_gen del train_datagen del val_datagen del test_datagen del model del history print(np.mean(scores), np.std(scores))
code
90142889/cell_13
[ "text_plain_output_1.png" ]
import itertools import keras import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/demand-forecast-abal2/demand_forecast.csv', parse_dates=['date']) df df.groupby(['store', 'item']).size() input_df = df.set_index(['date', 'store', 'item']) def train_test_split(dataframe, train_size=0.9): index_df = input_df.index.levels[0] train_cnt = int(len(index_df) * train_size) pivot = index_df[train_cnt] train = dataframe.iloc[dataframe.index.get_level_values('date') <= pivot] test = dataframe.iloc[dataframe.index.get_level_values('date') > pivot] return (train, test) train_df.index.levels[1:][0] train_df.iloc[(train_df.index.get_level_values('store') == 1) & (train_df.index.get_level_values('item') == 1)] def create_dataset(input_df, time_steps=1): Xs, ys = ([], []) stores = train_df.index.levels[1] items = train_df.index.levels[2] store_items = list(itertools.product(stores, items)) for store, item in store_items: curr_df = input_df.iloc[(input_df.index.get_level_values('store') == store) & (input_df.index.get_level_values('item') == item)] for i in range(len(curr_df) - time_steps): v = curr_df.iloc[i:i + time_steps].values Xs.append(v) ys.append(curr_df.iloc[i + time_steps].values) return (np.array(Xs), np.array(ys)) time_steps = 10 X_train, y_train = create_dataset(train_df, time_steps) X_test, y_test = create_dataset(test_df, time_steps) model = keras.Sequential() model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=128, input_shape=(X_train.shape[1], X_train.shape[2])))) model.add(keras.layers.Dropout(rate=0.2)) model.add(keras.layers.Dense(units=1)) model.compile(loss='mean_squared_error', optimizer='adam')
code
90142889/cell_9
[ "image_output_1.png" ]
train_df.index.levels[1:][0]
code
90142889/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from statsmodels.graphics.tsaplots import plot_acf import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/demand-forecast-abal2/demand_forecast.csv', parse_dates=['date']) df def plot_autocorrelation(store_num, item_num, lag=50): filtered_df = df[(df['store'] == store_num) & (df['item'] == item_num)] for store_num in range(1, 3): for item_num in range(12, 17): plot_autocorrelation(store_num, item_num, 21)
code
90142889/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/demand-forecast-abal2/demand_forecast.csv', parse_dates=['date']) df
code
90142889/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from statsmodels.graphics.tsaplots import plot_acf import matplotlib.pyplot as plt import tensorflow as tf import keras import itertools import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90142889/cell_15
[ "text_plain_output_1.png" ]
import itertools import keras import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf df = pd.read_csv('/kaggle/input/demand-forecast-abal2/demand_forecast.csv', parse_dates=['date']) df df.groupby(['store', 'item']).size() input_df = df.set_index(['date', 'store', 'item']) def train_test_split(dataframe, train_size=0.9): index_df = input_df.index.levels[0] train_cnt = int(len(index_df) * train_size) pivot = index_df[train_cnt] train = dataframe.iloc[dataframe.index.get_level_values('date') <= pivot] test = dataframe.iloc[dataframe.index.get_level_values('date') > pivot] return (train, test) train_df.index.levels[1:][0] train_df.iloc[(train_df.index.get_level_values('store') == 1) & (train_df.index.get_level_values('item') == 1)] def create_dataset(input_df, time_steps=1): Xs, ys = ([], []) stores = train_df.index.levels[1] items = train_df.index.levels[2] store_items = list(itertools.product(stores, items)) for store, item in store_items: curr_df = input_df.iloc[(input_df.index.get_level_values('store') == store) & (input_df.index.get_level_values('item') == item)] for i in range(len(curr_df) - time_steps): v = curr_df.iloc[i:i + time_steps].values Xs.append(v) ys.append(curr_df.iloc[i + time_steps].values) return (np.array(Xs), np.array(ys)) time_steps = 10 X_train, y_train = create_dataset(train_df, time_steps) X_test, y_test = create_dataset(test_df, time_steps) model = keras.Sequential() model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=128, input_shape=(X_train.shape[1], X_train.shape[2])))) model.add(keras.layers.Dropout(rate=0.2)) model.add(keras.layers.Dense(units=1)) model.compile(loss='mean_squared_error', optimizer='adam') X_train = tf.cast(X_train, dtype='float64') y_train = tf.cast(y_train, dtype='float64') X_test = tf.cast(X_test, dtype='float64') y_test = tf.cast(y_test, dtype='float64') history = model.fit(X_train, y_train, epochs=30, batch_size=32, validation_split=0.1, shuffle=False)
code
90142889/cell_17
[ "text_html_output_1.png" ]
import itertools import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf df = pd.read_csv('/kaggle/input/demand-forecast-abal2/demand_forecast.csv', parse_dates=['date']) df df.groupby(['store', 'item']).size() input_df = df.set_index(['date', 'store', 'item']) def train_test_split(dataframe, train_size=0.9): index_df = input_df.index.levels[0] train_cnt = int(len(index_df) * train_size) pivot = index_df[train_cnt] train = dataframe.iloc[dataframe.index.get_level_values('date') <= pivot] test = dataframe.iloc[dataframe.index.get_level_values('date') > pivot] return (train, test) train_df.index.levels[1:][0] train_df.iloc[(train_df.index.get_level_values('store') == 1) & (train_df.index.get_level_values('item') == 1)] def create_dataset(input_df, time_steps=1): Xs, ys = ([], []) stores = train_df.index.levels[1] items = train_df.index.levels[2] store_items = list(itertools.product(stores, items)) for store, item in store_items: curr_df = input_df.iloc[(input_df.index.get_level_values('store') == store) & (input_df.index.get_level_values('item') == item)] for i in range(len(curr_df) - time_steps): v = curr_df.iloc[i:i + time_steps].values Xs.append(v) ys.append(curr_df.iloc[i + time_steps].values) return (np.array(Xs), np.array(ys)) time_steps = 10 X_train, y_train = create_dataset(train_df, time_steps) X_test, y_test = create_dataset(test_df, time_steps) model = keras.Sequential() model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=128, input_shape=(X_train.shape[1], X_train.shape[2])))) model.add(keras.layers.Dropout(rate=0.2)) model.add(keras.layers.Dense(units=1)) model.compile(loss='mean_squared_error', optimizer='adam') X_train = tf.cast(X_train, dtype='float64') y_train = tf.cast(y_train, dtype='float64') X_test = tf.cast(X_test, dtype='float64') y_test = tf.cast(y_test, dtype='float64') history = model.fit(X_train, y_train, epochs=30, batch_size=32, validation_split=0.1, shuffle=False) plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend()
code
90142889/cell_10
[ "text_html_output_1.png" ]
train_df.index.levels[1:][0] train_df.iloc[(train_df.index.get_level_values('store') == 1) & (train_df.index.get_level_values('item') == 1)]
code
90142889/cell_12
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import itertools import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/demand-forecast-abal2/demand_forecast.csv', parse_dates=['date']) df df.groupby(['store', 'item']).size() input_df = df.set_index(['date', 'store', 'item']) def train_test_split(dataframe, train_size=0.9): index_df = input_df.index.levels[0] train_cnt = int(len(index_df) * train_size) pivot = index_df[train_cnt] train = dataframe.iloc[dataframe.index.get_level_values('date') <= pivot] test = dataframe.iloc[dataframe.index.get_level_values('date') > pivot] return (train, test) train_df.index.levels[1:][0] train_df.iloc[(train_df.index.get_level_values('store') == 1) & (train_df.index.get_level_values('item') == 1)] def create_dataset(input_df, time_steps=1): Xs, ys = ([], []) stores = train_df.index.levels[1] items = train_df.index.levels[2] store_items = list(itertools.product(stores, items)) for store, item in store_items: curr_df = input_df.iloc[(input_df.index.get_level_values('store') == store) & (input_df.index.get_level_values('item') == item)] for i in range(len(curr_df) - time_steps): v = curr_df.iloc[i:i + time_steps].values Xs.append(v) ys.append(curr_df.iloc[i + time_steps].values) return (np.array(Xs), np.array(ys)) time_steps = 10 X_train, y_train = create_dataset(train_df, time_steps) X_test, y_test = create_dataset(test_df, time_steps) print(X_train.shape, y_train.shape)
code
90142889/cell_5
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/demand-forecast-abal2/demand_forecast.csv', parse_dates=['date']) df df.groupby(['store', 'item']).size()
code
121153356/cell_4
[ "text_plain_output_1.png" ]
dictionary = {'Doğan': 23, 'Efe': 22} print(dictionary) print(type(dictionary)) print(dictionary.values()) keys = dictionary.keys() if 'Doğan' in keys: print('Yes') else: print('No')
code
121153356/cell_2
[ "text_plain_output_1.png" ]
var1 = 10 var2 = 20 var3 = 30 list1 = [10, 20, 30] type(list1) print(list1[-1]) print(list1[0:2]) list1.append(40) print(list1) list1.remove(40) print(list1) list1.reverse() print(list1) list1.sort() print(list1)
code
121153356/cell_1
[ "text_plain_output_1.png" ]
number = 5 day = 'Monday' print(number)
code
121153356/cell_3
[ "text_plain_output_1.png" ]
def circle_perimeter(r, pi=3.14): result = 2 * pi * r return result print(circle_perimeter(3)) def calculate(x): result = x * x return result result = calculate(3) print(result) result2 = lambda x: x * x print(result2(4))
code
2001660/cell_9
[ "text_plain_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) random_images = train.sample(100) random_images[:5]
code
2001660/cell_23
[ "text_html_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) test_images = [] for fname in sorted(os.listdir(test_path)): test_images.append(fname) test = pd.DataFrame(test_images, columns=['fname']) random_images = train.sample(100) random_images[:5] cols = ['pca1', 'pca2'] for col in cols: train[col] = None test[col] = None cols
code
2001660/cell_20
[ "text_plain_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) test_images = [] for fname in sorted(os.listdir(test_path)): test_images.append(fname) test = pd.DataFrame(test_images, columns=['fname']) test.sample(5)
code
2001660/cell_6
[ "text_plain_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) test_images = [] for fname in sorted(os.listdir(test_path)): test_images.append(fname) test = pd.DataFrame(test_images, columns=['fname']) print(test.shape) test.head(5)
code
2001660/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from pathlib import Path from sklearn.decomposition import PCA import cv2 import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' def get_center_crop(img, d=250): cy = img.shape[0] // 2 cx = img.shape[1] // 2 return img[cy - d:cy + d, cx - d:cx + d] def get_blurring(img): return np.max(cv2.convertScaleAbs(cv2.Laplacian(img, 3))) cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) n_components = 5 pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True) def get_pca_feature(img_channel): return pca.fit(img_channel) random_images = train.sample(100) random_images[:5] img_set_reds = [] for i, r in random_images.iterrows(): x = get_center_crop(cv2.imread('../input/train/' + r['camera'] + '/' + r['fname'])) img_set_reds.append(np.ravel(x)) img_set_reds = np.asarray(img_set_reds) pf = pca.fit(np.asarray(img_set_reds)) x = pf.transform(np.ravel(get_center_crop(cv2.imread('../input/train/' + r['camera'] + '/' + r['fname']))).reshape(1, -1)) x
code
2001660/cell_19
[ "text_plain_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) random_images = train.sample(100) random_images[:5] train.sample(5)
code
2001660/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import os from pathlib import Path import multiprocessing as mp import numpy as np import pandas as pd from skimage.data import imread from sklearn.ensemble import RandomForestClassifier import time import cv2 from sklearn.decomposition import PCA from sklearn.svm import SVC from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2001660/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pathlib import Path from sklearn.ensemble import RandomForestClassifier import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) test_images = [] for fname in sorted(os.listdir(test_path)): test_images.append(fname) test = pd.DataFrame(test_images, columns=['fname']) random_images = train.sample(100) random_images[:5] cols = ['pca1', 'pca2'] for col in cols: train[col] = None test[col] = None train.sample(5) test.sample(5) def get_random_forest_clf(df_train, cols): y = df_train['camera'].values X_train = df_train[cols].values return RandomForestClassifier(n_estimators=200).fit(train, y) y = train['camera'].values X_train = train[cols].values X_test = test[cols].values clf = RandomForestClassifier(n_estimators=5) clf.fit(X_train, y)
code
2001660/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from pathlib import Path from sklearn.decomposition import PCA import cv2 import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' def get_center_crop(img, d=250): cy = img.shape[0] // 2 cx = img.shape[1] // 2 return img[cy - d:cy + d, cx - d:cx + d] def get_blurring(img): return np.max(cv2.convertScaleAbs(cv2.Laplacian(img, 3))) cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) n_components = 5 pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True) def get_pca_feature(img_channel): return pca.fit(img_channel) random_images = train.sample(100) random_images[:5] img_set_reds = [] for i, r in random_images.iterrows(): x = get_center_crop(cv2.imread('../input/train/' + r['camera'] + '/' + r['fname'])) img_set_reds.append(np.ravel(x)) img_set_reds = np.asarray(img_set_reds) print(img_set_reds.shape) print([img_set_reds[i].shape for i in range(10)]) pf = pca.fit(np.asarray(img_set_reds))
code
2001660/cell_12
[ "text_html_output_1.png" ]
from pathlib import Path from sklearn.decomposition import PCA import cv2 import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' def get_center_crop(img, d=250): cy = img.shape[0] // 2 cx = img.shape[1] // 2 return img[cy - d:cy + d, cx - d:cx + d] def get_blurring(img): return np.max(cv2.convertScaleAbs(cv2.Laplacian(img, 3))) cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) train.sample(5) n_components = 5 pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True) def get_pca_feature(img_channel): return pca.fit(img_channel) random_images = train.sample(100) random_images[:5] img_set_reds = [] for i, r in random_images.iterrows(): x = get_center_crop(cv2.imread('../input/train/' + r['camera'] + '/' + r['fname'])) img_set_reds.append(np.ravel(x)) img_set_reds = np.asarray(img_set_reds) pf = pca.fit(np.asarray(img_set_reds)) x = pf.transform(np.ravel(get_center_crop(cv2.imread('../input/train/' + r['camera'] + '/' + r['fname']))).reshape(1, -1)) x t = get_pca_features(get_center_crop(cv2.imread('../input/train/' + r['camera'] + '/' + r['fname']))) t[0][1]
code
2001660/cell_5
[ "text_html_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) print(train.shape) train.sample(5)
code
128043510/cell_21
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, Y_train) predictions = lr.predict(X_test) print('Actual value of the house:- ', Y_test[0]) print('model Predicted values:- ', predictions[0])
code
128043510/cell_13
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum() plt.tight_layout() fig, ax = plt.subplots() ax.scatter(x = data["CRIM"], y = data["SalePrice"]) plt.ylabel("SalesPrice", fontsize = 13) plt.xlabel("CRIM", fontsize = 13) plt.show() fig, ax = plt.subplots() ax.scatter(x=data['AGE'], y=data['SalePrice']) plt.ylabel('SalesPrice', fontsize=13) plt.xlabel('AGE', fontsize=13) plt.show()
code
128043510/cell_9
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum() sns.pairplot(data, height=2.5) plt.tight_layout()
code
128043510/cell_4
[ "image_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y print(data.shape)
code
128043510/cell_20
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, Y_train)
code
128043510/cell_6
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.describe()
code
128043510/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) print(data) data['SalePrice'] = y data.head()
code
128043510/cell_11
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum() print('Skewness: %f' % data['SalePrice'].skew()) print('Kurtosis: %f' % data['SalePrice'].kurt())
code
128043510/cell_19
[ "image_output_1.png" ]
print(X_train.shape) print(X_test.shape) print(Y_train.shape) print(Y_test.shape)
code
128043510/cell_8
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum()
code
128043510/cell_16
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew # for some statistics from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum() plt.tight_layout() fig, ax = plt.subplots() ax.scatter(x = data["CRIM"], y = data["SalePrice"]) plt.ylabel("SalesPrice", fontsize = 13) plt.xlabel("CRIM", fontsize = 13) plt.show() fig, ax = plt.subplots() ax.scatter(x = data["AGE"], y = data["SalePrice"]) plt.ylabel("SalesPrice", fontsize = 13) plt.xlabel("AGE", fontsize = 13) plt.show() from scipy import stats from scipy.stats import norm, skew # for some statistics sns.distplot(data["SalePrice"], fit = norm) (mu, sigma) = norm.fit(data["SalePrice"]) print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma)) plt.legend(["Normal dist. ($\mu=$ {:.2f})".format(mu, sigma)],loc = "best") #loc is mean and scale is standard deviation plt.ylabel("Frequency") plt.title("Saleprice distribution") # Get also the QQ - plot quardrant quartiles fig = plt.figure() res = stats.probplot(data["SalePrice"], plot = plt) plt.show() plt.figure(figsize=(10, 10)) cor = data.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.PuBu) plt.show()
code
128043510/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y print(load_boston.DESCR)
code
128043510/cell_14
[ "text_html_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew # for some statistics from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum() plt.tight_layout() fig, ax = plt.subplots() ax.scatter(x = data["CRIM"], y = data["SalePrice"]) plt.ylabel("SalesPrice", fontsize = 13) plt.xlabel("CRIM", fontsize = 13) plt.show() fig, ax = plt.subplots() ax.scatter(x = data["AGE"], y = data["SalePrice"]) plt.ylabel("SalesPrice", fontsize = 13) plt.xlabel("AGE", fontsize = 13) plt.show() from scipy import stats from scipy.stats import norm, skew sns.distplot(data['SalePrice'], fit=norm) mu, sigma = norm.fit(data['SalePrice']) print('\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) plt.legend(['Normal dist. ($\\mu=$ {:.2f})'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('Saleprice distribution') fig = plt.figure() res = stats.probplot(data['SalePrice'], plot=plt) plt.show()
code
128043510/cell_22
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import numpy as np # linear algebra from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, Y_train) predictions = lr.predict(X_test) from sklearn.metrics import mean_squared_error mse = mean_squared_error(Y_test, predictions) rmse = np.sqrt(mse) print(rmse)
code
128043510/cell_10
[ "application_vnd.jupyter.stderr_output_2.png", "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum() plt.tight_layout() sns.distplot(data['SalePrice'])
code
128043510/cell_12
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.isnull().sum() plt.tight_layout() fig, ax = plt.subplots() ax.scatter(x=data['CRIM'], y=data['SalePrice']) plt.ylabel('SalesPrice', fontsize=13) plt.xlabel('CRIM', fontsize=13) plt.show()
code
128043510/cell_5
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_boston import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.datasets import load_boston load_boston = load_boston() x = load_boston.data y = load_boston.target data = pd.DataFrame(x, columns=load_boston.feature_names) data['SalePrice'] = y data.info()
code
17132944/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import KeyedVectors from gensim.models import Word2Vec from nltk.tokenize import RegexpTokenizer import pandas as pd import gensim from gensim.models import Word2Vec from gensim.models import KeyedVectors import pandas as pd from nltk.tokenize import RegexpTokenizer forum_posts = pd.read_csv('../input/meta-kaggle/ForumMessages.csv') sentences = forum_posts.Message.astype('str').tolist() tokenizer = RegexpTokenizer('\\w+') sentences_tokenized = [w.lower() for w in sentences] sentences_tokenized = [tokenizer.tokenize(i) for i in sentences_tokenized] model = KeyedVectors.load_word2vec_format('../input/word2vec-google/GoogleNews-vectors-negative300.bin', binary=True) model_2 = Word2Vec(size=300, min_count=1) model_2.build_vocab(sentences_tokenized) total_examples = model_2.corpus_count model_2.build_vocab([list(model.vocab.keys())], update=True) model_2.intersect_word2vec_format('../input/word2vec-google/GoogleNews-vectors-negative300.bin', binary=True) model_2.train(sentences_tokenized, total_examples=total_examples, epochs=model_2.iter) model.similarity('kaggle', 'google')
code
17132944/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import KeyedVectors from gensim.models import Word2Vec from nltk.tokenize import RegexpTokenizer import pandas as pd import gensim from gensim.models import Word2Vec from gensim.models import KeyedVectors import pandas as pd from nltk.tokenize import RegexpTokenizer forum_posts = pd.read_csv('../input/meta-kaggle/ForumMessages.csv') sentences = forum_posts.Message.astype('str').tolist() tokenizer = RegexpTokenizer('\\w+') sentences_tokenized = [w.lower() for w in sentences] sentences_tokenized = [tokenizer.tokenize(i) for i in sentences_tokenized] model = KeyedVectors.load_word2vec_format('../input/word2vec-google/GoogleNews-vectors-negative300.bin', binary=True) model_2 = Word2Vec(size=300, min_count=1) model_2.build_vocab(sentences_tokenized) total_examples = model_2.corpus_count model_2.build_vocab([list(model.vocab.keys())], update=True) model_2.intersect_word2vec_format('../input/word2vec-google/GoogleNews-vectors-negative300.bin', binary=True) model_2.train(sentences_tokenized, total_examples=total_examples, epochs=model_2.iter) model_2.similarity('kaggle', 'google')
code
17132944/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import KeyedVectors from gensim.models import Word2Vec from nltk.tokenize import RegexpTokenizer import pandas as pd import gensim from gensim.models import Word2Vec from gensim.models import KeyedVectors import pandas as pd from nltk.tokenize import RegexpTokenizer forum_posts = pd.read_csv('../input/meta-kaggle/ForumMessages.csv') sentences = forum_posts.Message.astype('str').tolist() tokenizer = RegexpTokenizer('\\w+') sentences_tokenized = [w.lower() for w in sentences] sentences_tokenized = [tokenizer.tokenize(i) for i in sentences_tokenized] model = KeyedVectors.load_word2vec_format('../input/word2vec-google/GoogleNews-vectors-negative300.bin', binary=True) model_2 = Word2Vec(size=300, min_count=1) model_2.build_vocab(sentences_tokenized) total_examples = model_2.corpus_count model_2.build_vocab([list(model.vocab.keys())], update=True) model_2.intersect_word2vec_format('../input/word2vec-google/GoogleNews-vectors-negative300.bin', binary=True) model_2.train(sentences_tokenized, total_examples=total_examples, epochs=model_2.iter)
code
2016332/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): print('The column %s only has one unique value with %r.' % (c, single_val_c[c])) print('It does work for the classification, which will be removed.') feature_columns.remove(c)
code
2016332/cell_4
[ "text_html_output_1.png" ]
import pandas as pd data_df = pd.read_csv('../input/mushrooms.csv') data_df.info()
code
2016332/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) pyo.iplot(fig) stats_df = pd.concat(stats_df, axis=0)
code
2016332/cell_18
[ "text_html_output_1.png" ]
from sklearn.linear_model import RidgeClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder, PolynomialFeatures from time import time import numpy as np import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_all = pd.get_dummies(data=data_df, columns=x_columns, prefix=x_columns).drop(list(single_val_c.keys()) + ['class'], axis=1) def ridge_model(X_train, y_train): r_c = Pipeline([('poly', PolynomialFeatures(interaction_only=True)), ('clf', RidgeClassifier())]) params_pool = {'poly__degree': [2, 3], 'clf__alpha': np.logspace(-1, 3, num=6)} gs_c = GridSearchCV(r_c, param_grid=params_pool, n_jobs=-1, cv=4) gs_c.fit(X_train, y_train) return gs_c def do_model_train(model_name, X_train, y_train, X_test, y_test): bg = time() if 'Ridge' == model_name: model = ridge_model(X_train, y_train) y_hat = model.predict(X_train) y_hat = model.predict(X_test) X_all, y_all = (data_all[[c for c in data_all.columns if c != 'y']], data_all['y']) X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.33, random_state=1) do_model_train('Ridge', X_train, y_train, X_test, y_test)
code
2016332/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) stats_df.describe()
code
2016332/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_all = pd.get_dummies(data=data_df, columns=x_columns, prefix=x_columns).drop(list(single_val_c.keys()) + ['class'], axis=1) data_all.head()
code
2016332/cell_3
[ "text_html_output_10.png", "text_html_output_16.png", "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_15.png", "text_html_output_5.png", "text_html_output_14.png", "text_html_output_19.png", "text_html_output_9.png", "text_html_output_13.png", "text_html_output_20.png", "text_html_output_21.png", "text_html_output_1.png", "text_html_output_17.png", "text_html_output_18.png", "text_html_output_12.png", "text_html_output_11.png", "text_html_output_8.png", "text_html_output_3.png", "text_html_output_7.png" ]
from subprocess import check_output np.set_printoptions(suppress=True, linewidth=300) pd.options.display.float_format = lambda x: '%0.6f' % x pyo.init_notebook_mode(connected=True) print(check_output(['ls', '../input']).decode('utf-8'))
code
2016332/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as pyo data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) feature_columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(feature_columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() df = pd.DataFrame(index=range(len(m))) df['col'] = c df['val'] = m.index.values df['positive_percentage'] = m.values df['quantity_percentage'] = s.values / s.sum() stats_df.append(df) trace_prate = go.Bar(x=df['val'], y=df['positive_percentage'] * 100, name='positive percentage') trace_cnt = go.Bar(x=df['val'], y=df['quantity_percentage'] * 100, name='quantity percentage') layout = go.Layout(xaxis=dict(title=c), yaxis=dict(title='positive and quantity percentage')) fig = go.Figure(data=[trace_prate, trace_cnt], layout=layout) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): feature_columns.remove(c) data_all = pd.get_dummies(data=data_df, columns=x_columns, prefix=x_columns).drop(list(single_val_c.keys()) + ['class'], axis=1) data_all.info()
code
2016332/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('../input/mushrooms.csv') data_df.head()
code
16116423/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample_sub = pd.read_csv('../input/sample_submission.csv') from copy import copy X_train = train.drop(columns=['scalar_coupling_constant']).copy() y_train = train['scalar_coupling_constant'].copy() X_test = test.copy() X_train = X_train.drop(columns=['id']) X_test = X_test.drop(columns=['id']) print(f'X_train.shape: {X_train.shape}') print(f'X_test.shape: {X_test.shape}')
code
16116423/cell_1
[ "text_plain_output_1.png" ]
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) !pip install category_encoders !pip install LightGBM import category_encoders as ce import lightgbm as lgbm # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output.
code
16116423/cell_10
[ "text_plain_output_1.png" ]
from sklearn.model_selection import KFold import lightgbm as lgbm import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample_sub = pd.read_csv('../input/sample_submission.csv') from copy import copy X_train = train.drop(columns=['scalar_coupling_constant']).copy() y_train = train['scalar_coupling_constant'].copy() X_test = test.copy() X_train = X_train.drop(columns=['id']) X_test = X_test.drop(columns=['id']) from sklearn.model_selection import KFold from sklearn.metrics import mean_absolute_error as mae kf = KFold(n_splits=5) fold = 0 for train_index, val_index in kf.split(X_train): fold += 1 lgbm_model = lgbm.LGBMRegressor() lgbm_model.fit(X_train.loc[train_index, :], y_train[train_index]) y_val = lgbm_model.predict(X_train.loc[val_index, :]) print(f'fold{fold} score: {mae(y_train[val_index], y_val)}')
code
16116423/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sample_sub = pd.read_csv('../input/sample_submission.csv') from copy import copy X_train = train.drop(columns=['scalar_coupling_constant']).copy() y_train = train['scalar_coupling_constant'].copy() X_test = test.copy() print(f'X_train.shape: {X_train.shape}') print(f'X_test.shape: {X_test.shape}')
code
122263646/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import setuptools from transformers import pipeline import pandas as pd
code
122263646/cell_1
[ "text_plain_output_1.png" ]
#!pip install transformers==2.10.0 #!pip install simpletransformers !pip install pyopenssl
code
122263646/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/large-quotes/Quotes_Large.csv') data.head()
code
16121779/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.head()
code
16121779/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df['Year']
code
16121779/cell_2
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16121779/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns df['Methods'].nunique()
code
16121779/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code