path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34147773/cell_42
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids) embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0) available_uids = unique_example_uids.intersection(embeddings_df.index) example_embeddings_df = embeddings_df.loc[available_uids] feature_pop_means = embeddings_df.mean(0) informative_embeddings_df = embeddings_df.loc[:, p_vals < 0.05 / len(p_vals)] clustering = KMeans(n_clusters=10, random_state=0).fit(informative_embeddings_df.values) labels = clustering.labels_ uid_cluster_map = dict(zip(informative_embeddings_df.index, labels)) for i in range(1, 11): print(i) cluster_ids = set([k for k, v in uid_cluster_map.items() if v == i]) cluster_ids = cluster_ids.intersection(metadata_df.index) for ele in metadata_df.loc[cluster_ids, 'title']: if isinstance(ele, str): if ('corona' in ele.lower() or 'cov' in ele.lower()) and ('humid' in ele.lower() or 'temperature' in ele.lower()): print('\t', ele)
code
34147773/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids) embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0) available_uids = unique_example_uids.intersection(embeddings_df.index) example_embeddings_df = embeddings_df.loc[available_uids] example_embeddings_df
code
34147773/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids) embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0) available_uids = unique_example_uids.intersection(embeddings_df.index) example_embeddings_df = embeddings_df.loc[available_uids] for i in range(1, 21, 2): plt.scatter(embeddings_df[i], embeddings_df[i + 1]) plt.scatter(example_embeddings_df[i], example_embeddings_df[i + 1]) plt.show()
code
34147773/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid')
code
34147773/cell_40
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids) embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0) available_uids = unique_example_uids.intersection(embeddings_df.index) example_embeddings_df = embeddings_df.loc[available_uids] feature_pop_means = embeddings_df.mean(0) informative_embeddings_df = embeddings_df.loc[:, p_vals < 0.05 / len(p_vals)] clustering = KMeans(n_clusters=10, random_state=0).fit(informative_embeddings_df.values) labels = clustering.labels_ uid_cluster_map = dict(zip(informative_embeddings_df.index, labels)) example_clusters = [uid_cluster_map[uid] for uid in example_embeddings_df.index] example_clusters
code
34147773/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') metadata_df
code
34147773/cell_32
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids) embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0) available_uids = unique_example_uids.intersection(embeddings_df.index) example_embeddings_df = embeddings_df.loc[available_uids] feature_pop_means = embeddings_df.mean(0) informative_embeddings_df = embeddings_df.loc[:, p_vals < 0.05 / len(p_vals)] informative_embeddings_df
code
34147773/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids) embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0) available_uids = unique_example_uids.intersection(embeddings_df.index) example_embeddings_df = embeddings_df.loc[available_uids] plt.bar(range(len(p_vals)), -np.log(p_vals)) plt.hlines(-np.log(0.05), 0, 800) plt.hlines(-np.log(0.05 / len(p_vals)), 0, 800)
code
34147773/cell_15
[ "text_html_output_1.png" ]
import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) example_uids
code
34147773/cell_17
[ "text_html_output_1.png" ]
import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids)
code
34147773/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_df
code
34147773/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans import collections import pandas as pd metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv') example_shas = [] example_uids = [] for index, row in example_df.iterrows(): study_title = row['Study'] study_metadata = metadata_df[metadata_df['title'] == study_title] if len(study_metadata) != 0: sha = study_metadata.iloc[0]['sha'] uid = study_metadata.iloc[0].name if str(sha) != 'nan': example_shas.append(sha) example_uids.append(uid) unique_example_uids = set(example_uids) len(unique_example_uids) embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0) available_uids = unique_example_uids.intersection(embeddings_df.index) example_embeddings_df = embeddings_df.loc[available_uids] feature_pop_means = embeddings_df.mean(0) informative_embeddings_df = embeddings_df.loc[:, p_vals < 0.05 / len(p_vals)] clustering = KMeans(n_clusters=10, random_state=0).fit(informative_embeddings_df.values) labels = clustering.labels_ collections.Counter(labels)
code
128023859/cell_13
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.neural_network import MLPClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') def create_ensamble_submission(): submission_prognosis_list = [] for i in range(0, len(results_gbc)): votes = np.zeros(11) sorted = np.argsort(results_gbc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_svc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_nnc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii response = classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] submission_prognosis_list += [response] submission_df = pd.DataFrame() submission_df['id'] = test_df['id'] submission_df['prognosis'] = submission_prognosis_list return submission_df train_df.columns X = train_df.to_numpy()[:, 1:-1] X.shape y = train_df['prognosis'] classes = sorted(list(train_df['prognosis'].unique())) X.shape validate_records = 4 clf_gbc = GradientBoostingClassifier(n_estimators=2500, learning_rate=0.001, max_features=30, max_depth=3, random_state=0, subsample=0.6).fit(X[validate_records:], y[validate_records:]) clf_svc = make_pipeline(StandardScaler(), SVC(gamma='auto', kernel='poly', probability=True)) clf_svc.fit(X, y) clf_nnc = MLPClassifier(random_state=0, hidden_layer_sizes=3, max_iter=1000).fit(X[validate_records:], y[validate_records:]) results_gbc = clf_gbc.predict_proba(test_df.to_numpy()[:, 1:]) results_svc = clf_svc.predict_proba(test_df.to_numpy()[:, 1:]) results_nnc = clf_nnc.predict_proba(test_df.to_numpy()[:, 1:]) submission_df = create_ensamble_submission() submission_df
code
128023859/cell_9
[ "text_html_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') def create_ensamble_submission(): submission_prognosis_list = [] for i in range(0, len(results_gbc)): votes = np.zeros(11) sorted = np.argsort(results_gbc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_svc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_nnc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii response = classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] submission_prognosis_list += [response] submission_df = pd.DataFrame() submission_df['id'] = test_df['id'] submission_df['prognosis'] = submission_prognosis_list return submission_df train_df.columns X = train_df.to_numpy()[:, 1:-1] X.shape y = train_df['prognosis'] classes = sorted(list(train_df['prognosis'].unique())) X.shape validate_records = 4 clf_gbc = GradientBoostingClassifier(n_estimators=2500, learning_rate=0.001, max_features=30, max_depth=3, random_state=0, subsample=0.6).fit(X[validate_records:], y[validate_records:]) if validate_records > 0: print(clf_gbc.score(X[:validate_records], y[:validate_records]))
code
128023859/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') train_df.columns
code
128023859/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') train_df.columns X = train_df.to_numpy()[:, 1:-1] X.shape
code
128023859/cell_11
[ "text_html_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.neural_network import MLPClassifier import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') def create_ensamble_submission(): submission_prognosis_list = [] for i in range(0, len(results_gbc)): votes = np.zeros(11) sorted = np.argsort(results_gbc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_svc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_nnc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii response = classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] submission_prognosis_list += [response] submission_df = pd.DataFrame() submission_df['id'] = test_df['id'] submission_df['prognosis'] = submission_prognosis_list return submission_df train_df.columns X = train_df.to_numpy()[:, 1:-1] X.shape y = train_df['prognosis'] classes = sorted(list(train_df['prognosis'].unique())) X.shape validate_records = 4 clf_gbc = GradientBoostingClassifier(n_estimators=2500, learning_rate=0.001, max_features=30, max_depth=3, random_state=0, subsample=0.6).fit(X[validate_records:], y[validate_records:]) clf_nnc = MLPClassifier(random_state=0, hidden_layer_sizes=3, max_iter=1000).fit(X[validate_records:], y[validate_records:]) if validate_records > 0: print(clf_nnc.score(X[:validate_records], y[:validate_records]))
code
128023859/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.ensemble import GradientBoostingClassifier import numpy as np from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.neural_network import MLPClassifier import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128023859/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') train_df.columns X = train_df.to_numpy()[:, 1:-1] X.shape X.shape
code
128023859/cell_10
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') def create_ensamble_submission(): submission_prognosis_list = [] for i in range(0, len(results_gbc)): votes = np.zeros(11) sorted = np.argsort(results_gbc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_svc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii sorted = np.argsort(results_nnc[i]) for ii in range(0, 11): votes[sorted[ii]] += ii response = classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] votes[np.argmax(votes)] = 0 response += ' ' + classes[np.argmax(votes)] submission_prognosis_list += [response] submission_df = pd.DataFrame() submission_df['id'] = test_df['id'] submission_df['prognosis'] = submission_prognosis_list return submission_df train_df.columns X = train_df.to_numpy()[:, 1:-1] X.shape y = train_df['prognosis'] classes = sorted(list(train_df['prognosis'].unique())) X.shape validate_records = 4 clf_gbc = GradientBoostingClassifier(n_estimators=2500, learning_rate=0.001, max_features=30, max_depth=3, random_state=0, subsample=0.6).fit(X[validate_records:], y[validate_records:]) clf_svc = make_pipeline(StandardScaler(), SVC(gamma='auto', kernel='poly', probability=True)) clf_svc.fit(X, y) if validate_records > 0: print(clf_svc.score(X[:validate_records], y[:validate_records]))
code
128023859/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') test_df
code
128023859/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') train_df.columns train_df
code
50227784/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_df = pd.read_csv('../input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv') data_df.head()
code
50227784/cell_6
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
plt.rcParams['figure.facecolor'] = 'white' plt.rcParams['axes.facecolor'] = '#464646' plt.rcParams['figure.figsize'] = (10, 7) plt.rcParams['text.color'] = '#666666' plt.rcParams['axes.labelcolor'] = '#666666' plt.rcParams['axes.labelsize'] = 14 plt.rcParams['axes.titlesize'] = 16 plt.rcParams['xtick.color'] = '#666666' plt.rcParams['xtick.labelsize'] = 14 plt.rcParams['ytick.color'] = '#666666' plt.rcParams['ytick.labelsize'] = 14 sns.color_palette('dark') tqdm.pandas()
code
50227784/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Embedding, Dropout, Flatten, Layer, Input from keras.models import Sequential, Model from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer Ytrain = Ytrain.map({'positive': 1, 'negative': 0}) Ycv = Ycv.map({'positive': 1, 'negative': 0}) tokenizer = Tokenizer(num_words=20000, oov_token='<UNK>') tokenizer.fit_on_texts(Xtrain) word2num = tokenizer.word_index num2word = {k: w for w, k in word2num.items()} train_sequences = tokenizer.texts_to_sequences(Xtrain) maxlen = max([len(x) for x in train_sequences]) train_padded = pad_sequences(train_sequences, padding='post', truncating='post', maxlen=100) test_sequences = tokenizer.texts_to_sequences(Xcv) test_padded = pad_sequences(test_sequences, padding='post', truncating='post', maxlen=100) inp = Input(shape=(100,)) x = Embedding(20000, 256, trainable=False)(inp) x = Bidirectional(LSTM(300, return_sequences=True, dropout=0.25, recurrent_dropout=0.25))(x) x = Attention()(x) x = Dense(256, activation='relu')(x) x = Dropout(0.25)(x) x = Dense(1, activation='sigmoid')(x) model = Model(inputs=inp, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) file_path = 'model.hdf5' ckpt = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min') early = EarlyStopping(monitor='val_loss', mode='min', patience=1) model.fit(train_padded, Ytrain, batch_size=1024, epochs=30, validation_data=(test_padded, Ycv), callbacks=[ckpt])
code
50227784/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50227784/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_df = pd.read_csv('../input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv') data_df.shape data_df['sentiment'].value_counts()
code
50227784/cell_15
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer from nltk.tokenize import word_tokenize import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string data_df = pd.read_csv('../input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv') data_df.shape for i in range(10): idx = np.random.randint(1, 50001) def remove_break(text): return re.sub('<br />', '', text) def remove_punct(text): nopunct = '' for c in text: if c not in string.punctuation: nopunct = nopunct + c return nopunct def remove_numbers(text): return re.sub('[0-9]', '', text) def remove_links(text): return re.sub('http\\S+', '', text) def remove_stop_words(word_list): stopwords_list = set(stopwords.words('english')) word_list = [word for word in word_list if word not in stopwords_list] return ' '.join(word_list) def get_root(word_list): ps = PorterStemmer() return [ps.stem(word) for word in word_list] def clean_text(text): text = remove_break(text) text = remove_links(text) text = remove_numbers(text) text = remove_punct(text) word_list = word_tokenize(text) word_list = get_root(word_list) return ' '.join(word_list) data_df['clean_review'] = data_df['review'].progress_apply(clean_text)
code
50227784/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_df = pd.read_csv('../input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv') data_df.shape for i in range(10): idx = np.random.randint(1, 50001) data_df.head()
code
50227784/cell_3
[ "text_plain_output_1.png" ]
!pwd
code
50227784/cell_24
[ "text_plain_output_1.png" ]
from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Embedding, Dropout, Flatten, Layer, Input from keras.models import Sequential, Model inp = Input(shape=(100,)) x = Embedding(20000, 256, trainable=False)(inp) x = Bidirectional(LSTM(300, return_sequences=True, dropout=0.25, recurrent_dropout=0.25))(x) x = Attention()(x) x = Dense(256, activation='relu')(x) x = Dropout(0.25)(x) x = Dense(1, activation='sigmoid')(x) model = Model(inputs=inp, outputs=x) model.summary()
code
50227784/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_df = pd.read_csv('../input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv') data_df.shape
code
50227784/cell_27
[ "text_html_output_1.png" ]
from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Embedding, Dropout, Flatten, Layer, Input from keras.models import Sequential, Model from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from sklearn import metrics from sklearn.metrics import precision_recall_curve, auc, roc_auc_score, roc_curve, recall_score import seaborn as sns Ytrain = Ytrain.map({'positive': 1, 'negative': 0}) Ycv = Ycv.map({'positive': 1, 'negative': 0}) tokenizer = Tokenizer(num_words=20000, oov_token='<UNK>') tokenizer.fit_on_texts(Xtrain) word2num = tokenizer.word_index num2word = {k: w for w, k in word2num.items()} train_sequences = tokenizer.texts_to_sequences(Xtrain) maxlen = max([len(x) for x in train_sequences]) train_padded = pad_sequences(train_sequences, padding='post', truncating='post', maxlen=100) test_sequences = tokenizer.texts_to_sequences(Xcv) test_padded = pad_sequences(test_sequences, padding='post', truncating='post', maxlen=100) inp = Input(shape=(100,)) x = Embedding(20000, 256, trainable=False)(inp) x = Bidirectional(LSTM(300, return_sequences=True, dropout=0.25, recurrent_dropout=0.25))(x) x = Attention()(x) x = Dense(256, activation='relu')(x) x = Dropout(0.25)(x) x = Dense(1, activation='sigmoid')(x) model = Model(inputs=inp, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) file_path = 'model.hdf5' ckpt = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min') early = EarlyStopping(monitor='val_loss', mode='min', patience=1) model.fit(train_padded, Ytrain, batch_size=1024, epochs=30, validation_data=(test_padded, Ycv), callbacks=[ckpt]) Ycv_pred0 = model.predict(test_padded) Ycv_pred = (Ycv_pred0 > 0.5).astype('int64') print('Accuracy :', metrics.accuracy_score(Ycv, Ycv_pred)) print('f1 score macro :', metrics.f1_score(Ycv, Ycv_pred, average='macro')) print('f1 scoore micro :', metrics.f1_score(Ycv, Ycv_pred, average='micro')) print('Hamming loss :', metrics.hamming_loss(Ycv, Ycv_pred)) fpr, tpr, thresh = roc_curve(Ycv, Ycv_pred0) print('auc: ', auc(fpr, tpr)) print('Classification report: \n', metrics.classification_report(Ycv, Ycv_pred)) fig, ax = plt.subplots(figsize=[10, 7]) ax.set_title('Receiver Operating Characteristic trainning') ax.plot(fpr, tpr, sns.xkcd_rgb['greenish cyan']) ax.plot([0, 1], [0, 1], ls='--', c=sns.xkcd_rgb['red pink']) ax.set_xlim([-0.01, 1.01]) ax.set_ylim([-0.01, 1.01]) ax.set_ylabel('True Positive Rate') ax.set_xlabel('False Positive Rate')
code
50227784/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_df = pd.read_csv('../input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv') data_df.shape for i in range(10): idx = np.random.randint(1, 50001) print('Review {}:'.format(i + 1)) print('\n\n') print(data_df.iloc[idx]['review']) print('\n') print('*' * 100) print('\n')
code
128042900/cell_13
[ "text_html_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import numpy as np import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix sentiment_matrix.shape X = sentiment_matrix from sklearn.decomposition import TruncatedSVD SVD = TruncatedSVD(n_components=10) decomposed_matrix = SVD.fit_transform(X) decomposed_matrix.shape '\nThe singular value decomposition(SVD) provides another way to factorize a matrix, into singular vectors and singular values. ... The SVD is used widely both in the calculation of other matrix operations, such as matrix inverse, but also as a data reduction method in machine learning\n' decomposed_matrix.shape correlation_matrix = np.corrcoef(decomposed_matrix) correlation_matrix i = '0486413012' product_names = list(X.index) product_ID = product_names.index(i) product_ID correlation_product_ID = correlation_matrix[product_ID] correlation_product_ID.shape
code
128042900/cell_6
[ "text_html_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix
code
128042900/cell_11
[ "text_html_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import numpy as np import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix sentiment_matrix.shape X = sentiment_matrix from sklearn.decomposition import TruncatedSVD SVD = TruncatedSVD(n_components=10) decomposed_matrix = SVD.fit_transform(X) decomposed_matrix.shape '\nThe singular value decomposition(SVD) provides another way to factorize a matrix, into singular vectors and singular values. ... The SVD is used widely both in the calculation of other matrix operations, such as matrix inverse, but also as a data reduction method in machine learning\n' decomposed_matrix.shape correlation_matrix = np.corrcoef(decomposed_matrix) correlation_matrix
code
128042900/cell_19
[ "text_html_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) data = pd.read_csv('/kaggle/input/data-work/data_work') data.query("asin == '0486413012'") data.query("asin == '0005000009'") data.query("asin == '0005092663'") data.query("asin == '0310396336'")
code
128042900/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128042900/cell_7
[ "text_html_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix sentiment_matrix.shape
code
128042900/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) data = pd.read_csv('/kaggle/input/data-work/data_work') data.query("asin == '0486413012'") data.query("asin == '0005000009'") data.query("asin == '0005092663'")
code
128042900/cell_8
[ "text_html_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix sentiment_matrix.shape X = sentiment_matrix X.head(20)
code
128042900/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix new_pr.query("asin == '0310396336'")
code
128042900/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) data = pd.read_csv('/kaggle/input/data-work/data_work') data.query("asin == '0486413012'")
code
128042900/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) data = pd.read_csv('/kaggle/input/data-work/data_work') data.query("asin == '0486413012'") data.query("asin == '0005000009'")
code
128042900/cell_14
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import numpy as np import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix sentiment_matrix.shape X = sentiment_matrix from sklearn.decomposition import TruncatedSVD SVD = TruncatedSVD(n_components=10) decomposed_matrix = SVD.fit_transform(X) decomposed_matrix.shape '\nThe singular value decomposition(SVD) provides another way to factorize a matrix, into singular vectors and singular values. ... The SVD is used widely both in the calculation of other matrix operations, such as matrix inverse, but also as a data reduction method in machine learning\n' decomposed_matrix.shape correlation_matrix = np.corrcoef(decomposed_matrix) correlation_matrix i = '0486413012' product_names = list(X.index) product_ID = product_names.index(i) product_ID correlation_product_ID = correlation_matrix[product_ID] correlation_product_ID.shape Recommend = list(X.index[correlation_product_ID > 0.65]) Recommend.remove(i) Recommend[0:24]
code
128042900/cell_10
[ "text_html_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix sentiment_matrix.shape X = sentiment_matrix from sklearn.decomposition import TruncatedSVD SVD = TruncatedSVD(n_components=10) decomposed_matrix = SVD.fit_transform(X) decomposed_matrix.shape '\nThe singular value decomposition(SVD) provides another way to factorize a matrix, into singular vectors and singular values. ... The SVD is used widely both in the calculation of other matrix operations, such as matrix inverse, but also as a data reduction method in machine learning\n' decomposed_matrix.shape
code
128042900/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) sentiment_matrix = new_pr.pivot_table(values='total_score', index='asin', columns='reviewerID', fill_value=0) sentiment_matrix sentiment_matrix.shape X = sentiment_matrix i = '0486413012' product_names = list(X.index) product_ID = product_names.index(i) product_ID
code
128042900/cell_5
[ "text_html_output_1.png" ]
import pandas as pd new_pr = pd.read_csv('/kaggle/input/collaborative-csv/data_collaborative_full_csv') new_pr = new_pr.sample(50000) new_pr
code
1007980/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.decomposition import PCA from sklearn.ensemble import AdaBoostClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import Normalizer from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from time import time import numpy as np import pandas as pd start = time() train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') trainData = train_data.drop('label', 1) trainLabel = train_data[['label']] header = trainData.columns X_train, X_test, y_train, y_test = train_test_split(trainData, trainLabel, test_size=0.3, random_state=1) norm = Normalizer().fit(X_train) X_train = norm.transform(X_train) X_test = norm.transform(X_test) testData = norm.transform(test_data) X_train = pd.DataFrame(X_train, columns=header) X_test = pd.DataFrame(X_test, columns=header) testData = pd.DataFrame(testData, columns=header) y_train = y_train.as_matrix() y_test = y_test.as_matrix() end = time() start = time() component = 30 pca = PCA(n_components=component).fit(X_train) X_train = pca.transform(X_train) X_test = pca.transform(X_test) testData = pca.transform(testData) X_train = pd.DataFrame(X_train) X_test = pd.DataFrame(X_test) testData = pd.DataFrame(testData) neighbors = 5 CLF1 = KNeighborsClassifier(n_neighbors=neighbors).fit(X_train, np.ravel(y_train)) penalty_C = 10.0 CLF2 = SVC(C=penalty_C, gamma=0.1, kernel='rbf').fit(X_train, np.ravel(y_train)) max_depth = 15 CLF3 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=max_depth), n_estimators=1000, learning_rate=1.0, algorithm='SAMME.R').fit(X_train, np.ravel(y_train)) end = time() predLabel1 = CLF1.predict(testData) predLabel2 = CLF2.predict(testData) predLabel3 = CLF3.predict(testData) submission = pd.DataFrame({'ImageId': np.arange(1, predLabel1.shape[0] + 1), 'Label': predLabel1}) submission.to_csv('submission1_KNN.csv', index=False) submission = pd.DataFrame({'ImageId': np.arange(1, predLabel2.shape[0] + 1), 'Label': predLabel2}) submission.to_csv('submission2_SVM.csv', index=False) submission = pd.DataFrame({'ImageId': np.arange(1, predLabel3.shape[0] + 1), 'Label': predLabel3}) submission.to_csv('submission3_ADA.csv', index=False)
code
1007980/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.decomposition import PCA from sklearn.ensemble import AdaBoostClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import Normalizer from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from time import time import numpy as np import pandas as pd start = time() train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') trainData = train_data.drop('label', 1) trainLabel = train_data[['label']] header = trainData.columns X_train, X_test, y_train, y_test = train_test_split(trainData, trainLabel, test_size=0.3, random_state=1) norm = Normalizer().fit(X_train) X_train = norm.transform(X_train) X_test = norm.transform(X_test) testData = norm.transform(test_data) X_train = pd.DataFrame(X_train, columns=header) X_test = pd.DataFrame(X_test, columns=header) testData = pd.DataFrame(testData, columns=header) y_train = y_train.as_matrix() y_test = y_test.as_matrix() end = time() start = time() component = 30 pca = PCA(n_components=component).fit(X_train) X_train = pca.transform(X_train) X_test = pca.transform(X_test) testData = pca.transform(testData) X_train = pd.DataFrame(X_train) X_test = pd.DataFrame(X_test) testData = pd.DataFrame(testData) print('PCA - component : {}'.format(component)) neighbors = 5 CLF1 = KNeighborsClassifier(n_neighbors=neighbors).fit(X_train, np.ravel(y_train)) print('CLF | KNN-neighbors : {}'.format(neighbors)) penalty_C = 10.0 CLF2 = SVC(C=penalty_C, gamma=0.1, kernel='rbf').fit(X_train, np.ravel(y_train)) print('CLF | SVM-penalty_C : {}'.format(penalty_C)) max_depth = 15 CLF3 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=max_depth), n_estimators=1000, learning_rate=1.0, algorithm='SAMME.R').fit(X_train, np.ravel(y_train)) print('CLF | Adaboost-max_depth : {}'.format(max_depth)) print('\n---CLF1---') print('ACC: %f.4' % CLF1.score(X_test, y_test)) print('\n---CLF2---') print('ACC: %f.4' % CLF2.score(X_test, y_test)) print('\n---CLF3---') print('ACC: %f.4' % CLF3.score(X_test, y_test)) end = time() print('\n***Training Done: %.2f ***\n' % (end - start))
code
1007980/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd from time import time from sklearn.preprocessing import Normalizer from sklearn.decomposition import PCA from sklearn.ensemble import AdaBoostClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.cross_validation import train_test_split
code
1007980/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.preprocessing import Normalizer from time import time import pandas as pd start = time() train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') trainData = train_data.drop('label', 1) trainLabel = train_data[['label']] header = trainData.columns X_train, X_test, y_train, y_test = train_test_split(trainData, trainLabel, test_size=0.3, random_state=1) norm = Normalizer().fit(X_train) X_train = norm.transform(X_train) X_test = norm.transform(X_test) testData = norm.transform(test_data) X_train = pd.DataFrame(X_train, columns=header) X_test = pd.DataFrame(X_test, columns=header) testData = pd.DataFrame(testData, columns=header) y_train = y_train.as_matrix() y_test = y_test.as_matrix() end = time() print('\n***Loading Done: %.2f ***\n' % (end - start))
code
105199186/cell_9
[ "text_plain_output_1.png" ]
unig_dist = {'apple': 0.023, 'bee': 0.12, 'desk': 0.34, 'chair': 0.517} sum(unig_dist.values())
code
105199186/cell_11
[ "text_plain_output_1.png" ]
unig_dist = {'apple': 0.023, 'bee': 0.12, 'desk': 0.34, 'chair': 0.517} sum(unig_dist.values()) alpha = 3 / 4 noise_dist = {key: val ** alpha for key, val in unig_dist.items()} Z = sum(noise_dist.values()) noise_dist_normalized = {key: val / Z for key, val in noise_dist.items()} noise_dist_normalized sum(noise_dist_normalized.values())
code
105199186/cell_15
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra unig_dist = {'apple': 0.023, 'bee': 0.12, 'desk': 0.34, 'chair': 0.517} sum(unig_dist.values()) alpha = 3 / 4 noise_dist = {key: val ** alpha for key, val in unig_dist.items()} Z = sum(noise_dist.values()) noise_dist_normalized = {key: val / Z for key, val in noise_dist.items()} noise_dist_normalized sum(noise_dist_normalized.values()) K = 10 np.random.choice(list(noise_dist_normalized.keys()), size=K, p=list(noise_dist_normalized.values()))
code
105199186/cell_10
[ "image_output_1.png" ]
unig_dist = {'apple': 0.023, 'bee': 0.12, 'desk': 0.34, 'chair': 0.517} sum(unig_dist.values()) alpha = 3 / 4 noise_dist = {key: val ** alpha for key, val in unig_dist.items()} Z = sum(noise_dist.values()) noise_dist_normalized = {key: val / Z for key, val in noise_dist.items()} noise_dist_normalized
code
105199186/cell_5
[ "text_plain_output_1.png" ]
from IPython.display import Image Image('../input/noise-distpng/noise_dist.png')
code
74058017/cell_6
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv') y_train = train['claim'] X_train = train.drop(columns='claim') from sklearn.feature_selection import mutual_info_classif from sklearn.impute import SimpleImputer numerical_transformer = SimpleImputer(strategy='constant', fill_value=0) imputed_X_train = pd.DataFrame(numerical_transformer.fit_transform(X_train)) imputed_X_train.columns = X_train.columns cols_with_missing = [col for col in imputed_X_train.columns if imputed_X_train[col].isnull().any()] print(cols_with_missing)
code
74058017/cell_7
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv') y_train = train['claim'] X_train = train.drop(columns='claim') from sklearn.feature_selection import mutual_info_classif from sklearn.impute import SimpleImputer numerical_transformer = SimpleImputer(strategy='constant', fill_value=0) imputed_X_train = pd.DataFrame(numerical_transformer.fit_transform(X_train)) imputed_X_train.columns = X_train.columns cols_with_missing = [col for col in imputed_X_train.columns if imputed_X_train[col].isnull().any()] model = RandomForestClassifier(n_estimators=10, max_samples=100000, n_jobs=4) model.fit(imputed_X_train, y_train)
code
74052853/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape print(f'Number of rows in training dataset------------->{train_row}\nNumber of columns in training dataset---------->{train_col}\n') print(f'Number of rows in testing dataset-------------->{test_row}\nNumber of columns in testing dataset----------->{test_col}')
code
74052853/cell_26
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape train.corr() train.corrwith(train['claim']) plot , ax = plt.subplots(figsize=(10,8)) sns.heatmap(train.corr()) plot, ax = plt.subplots(figsize=(10, 8)) sns.countplot(train['claim'])
code
74052853/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train.head()
code
74052853/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape train.corr() train.corrwith(train['claim'])
code
74052853/cell_1
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74052853/cell_18
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape train.corr()
code
74052853/cell_32
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape train.corr() train.corrwith(train['claim']) plot , ax = plt.subplots(figsize=(10,8)) sns.heatmap(train.corr()) hist = train.hist(bins = 25, figsize=(70,45)) features = train.columns.tolist()[0:-1] target = ['claim'] train_colum_missing = train.isnull().sum() train_row_missing = train[features].isnull().sum(axis=1) train['no_of_missing_data'] = train_row_missing test_colum_missing = test.isnull().sum() test_row_missing = test[features].isnull().sum(axis=1) test['no_of_missing_data'] = test_row_missing print(f'Total number of missing values in training dataset---->{train_total_missing}') print(f'Total number of missing values in testing dataset----->{test_total_missing}') train_no_of_missing_rows = (train['no_of_missing_data'] != 0).sum() print('\n{0:{fill}{align}80}\n'.format('Training Data', fill='=', align='^')) print(f"Total rows -----------------------> {train_row}\nNumber of rows has missing data---> {train_no_of_missing_rows}\n{'-' * 50}\nNumber of rows has full data--------> {train_row - train_no_of_missing_rows}") test_no_of_missing_rows = (test['no_of_missing_data'] != 0).sum() print('\n{0:{fill}{align}80}\n'.format('Testing Data', fill='=', align='^')) print(f"Total rows -----------------------> {test_row}\nNumber of rows has missing data---> {test_no_of_missing_rows}\n{'-' * 50}\nNumber of rows has full data--------> {test_row - test_no_of_missing_rows}")
code
74052853/cell_17
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape train.describe()
code
74052853/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape train.corr() train.corrwith(train['claim']) plot, ax = plt.subplots(figsize=(10, 8)) sns.heatmap(train.corr())
code
74052853/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape print(train.info()) print('=' * 50) test.info()
code
74052853/cell_22
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape test.describe()
code
74052853/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) train_row, train_col = train.shape test_row, test_col = test.shape train.corr() train.corrwith(train['claim']) plot , ax = plt.subplots(figsize=(10,8)) sns.heatmap(train.corr()) hist = train.hist(bins=25, figsize=(70, 45))
code
74052853/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col=0) test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col=0) test.head()
code
128004723/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import warnings warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=pd.errors.PerformanceWarning) warnings.filterwarnings('ignore', category=FutureWarning) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') submission_df = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv') train_df = train_df.set_index('id') test_df = test_df.set_index('id') test_df.head(3)
code
128004723/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import warnings warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=pd.errors.PerformanceWarning) warnings.filterwarnings('ignore', category=FutureWarning) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') submission_df = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv') train_df = train_df.set_index('id') test_df = test_df.set_index('id') def convert_to_string(row): row_dict = row.to_dict() base = 'a person with the symptoms ' for symptom, value in row_dict.items(): if symptom == 'prognosis': continue elif value == 1: base += symptom.replace('_', ' ') + ', ' base = base.rstrip(', ') return base train_df['text_symptom'] = train_df.apply(convert_to_string, axis=1) test_df['text_symptom'] = test_df.apply(convert_to_string, axis=1) train_df['prognosis'].head(5).values
code
128004723/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128004723/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import warnings warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=pd.errors.PerformanceWarning) warnings.filterwarnings('ignore', category=FutureWarning) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') submission_df = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv') train_df = train_df.set_index('id') test_df = test_df.set_index('id') def convert_to_string(row): row_dict = row.to_dict() base = 'a person with the symptoms ' for symptom, value in row_dict.items(): if symptom == 'prognosis': continue elif value == 1: base += symptom.replace('_', ' ') + ', ' base = base.rstrip(', ') return base train_df['text_symptom'] = train_df.apply(convert_to_string, axis=1) test_df['text_symptom'] = test_df.apply(convert_to_string, axis=1) train_df['text_symptom'].head(5).values
code
128004723/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import warnings warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=pd.errors.PerformanceWarning) warnings.filterwarnings('ignore', category=FutureWarning) train_df = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') submission_df = pd.read_csv('/kaggle/input/playground-series-s3e13/sample_submission.csv') train_df = train_df.set_index('id') test_df = test_df.set_index('id') train_df.head(3)
code
88085166/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)') df['Value'] = df['Value'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Value') df['Value'] = df['Value'].astype('float') pos_value = df.groupby('Position')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Total Value in Every Position (in thousand euros)') pos_value = df.groupby('Position')['Value'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Median Value in Every Position (in thousand euros)') top20_nationality = df.groupby('Nationality')['ID'].count().sort_values(ascending=False).reset_index()[:20] top20_nationality = top20_nationality.rename({'ID': 'num_of_player'}, axis=1) horizontal_bar(top20_nationality, 'num_of_player', 'Nationality', 'Top 20 Players Nationality') nationality_ovr_med = df.groupby('Nationality')['Overall'].median() nationality_num_of_players = df.groupby('Nationality')['ID'].count() nationality_df = pd.DataFrame({'num of player': nationality_num_of_players, 'mean overall': nationality_ovr_med}) nationality_df = nationality_df[nationality_df['num of player'] > 100] nationality_df = nationality_df.sort_values(by='mean overall', ascending=False) nationality_df = nationality_df.reset_index()[:20] horizontal_bar(nationality_df, 'mean overall', 'Nationality', 'Nationality with Highest Median Players Overall Ratings') league_ovr = df.groupby('Club')['Overall'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(league_ovr, 'Overall', 'Club', 'Clubs with Highest Median Overall Ratings') most_valuable_club = df.groupby('Club')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(most_valuable_club, 'Value', 'Club', 'Total Value Players in Every Clubs (in thousand eruos)')
code
88085166/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) make_distplot(df['Overall'], 'Player Overall Distribution')
code
88085166/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fifa19/data.csv') df.describe()
code
88085166/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)') df['Value'] = df['Value'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Value') df['Value'] = df['Value'].astype('float') pos_value = df.groupby('Position')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Total Value in Every Position (in thousand euros)') pos_value = df.groupby('Position')['Value'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Median Value in Every Position (in thousand euros)') top20_nationality = df.groupby('Nationality')['ID'].count().sort_values(ascending=False).reset_index()[:20] top20_nationality = top20_nationality.rename({'ID': 'num_of_player'}, axis=1) horizontal_bar(top20_nationality, 'num_of_player', 'Nationality', 'Top 20 Players Nationality') nationality_ovr_med = df.groupby('Nationality')['Overall'].median() nationality_num_of_players = df.groupby('Nationality')['ID'].count() nationality_df = pd.DataFrame({'num of player': nationality_num_of_players, 'mean overall': nationality_ovr_med}) nationality_df = nationality_df[nationality_df['num of player'] > 100] nationality_df = nationality_df.sort_values(by='mean overall', ascending=False) nationality_df = nationality_df.reset_index()[:20] horizontal_bar(nationality_df, 'mean overall', 'Nationality', 'Nationality with Highest Median Players Overall Ratings') league_ovr = df.groupby('Club')['Overall'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(league_ovr, 'Overall', 'Club', 'Clubs with Highest Median Overall Ratings')
code
88085166/cell_2
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fifa19/data.csv') df.head()
code
88085166/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) high_player_ovr = df[['Name', 'Overall']].sort_values(by='Overall', ascending=False).reset_index(drop=True)[:20] horizontal_bar(high_player_ovr, 'Overall', 'Name', 'Players With Highest Overall Ratings')
code
88085166/cell_19
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)') df['Value'] = df['Value'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Value') df['Value'] = df['Value'].astype('float') pos_value = df.groupby('Position')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Total Value in Every Position (in thousand euros)') pos_value = df.groupby('Position')['Value'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Median Value in Every Position (in thousand euros)') top20_nationality = df.groupby('Nationality')['ID'].count().sort_values(ascending=False).reset_index()[:20] top20_nationality = top20_nationality.rename({'ID': 'num_of_player'}, axis=1) horizontal_bar(top20_nationality, 'num_of_player', 'Nationality', 'Top 20 Players Nationality') nationality_ovr_med = df.groupby('Nationality')['Overall'].median() nationality_num_of_players = df.groupby('Nationality')['ID'].count() nationality_df = pd.DataFrame({'num of player': nationality_num_of_players, 'mean overall': nationality_ovr_med}) nationality_df = nationality_df[nationality_df['num of player'] > 100] nationality_df = nationality_df.sort_values(by='mean overall', ascending=False) nationality_df = nationality_df.reset_index()[:20] horizontal_bar(nationality_df, 'mean overall', 'Nationality', 'Nationality with Highest Median Players Overall Ratings')
code
88085166/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)') df['Value'] = df['Value'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Value') df['Value'] = df['Value'].astype('float') pos_value = df.groupby('Position')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Total Value in Every Position (in thousand euros)') pos_value = df.groupby('Position')['Value'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Median Value in Every Position (in thousand euros)') top20_nationality = df.groupby('Nationality')['ID'].count().sort_values(ascending=False).reset_index()[:20] top20_nationality = top20_nationality.rename({'ID': 'num_of_player'}, axis=1) horizontal_bar(top20_nationality, 'num_of_player', 'Nationality', 'Top 20 Players Nationality')
code
88085166/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) make_distplot(df['Age'], 'Player Age Distribution')
code
88085166/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)') df['Value'] = df['Value'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Value') df['Value'] = df['Value'].astype('float') pos_value = df.groupby('Position')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Total Value in Every Position (in thousand euros)')
code
88085166/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)') df['Value'] = df['Value'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Value') df['Value'] = df['Value'].astype('float') pos_value = df.groupby('Position')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Total Value in Every Position (in thousand euros)') pos_value = df.groupby('Position')['Value'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Median Value in Every Position (in thousand euros)')
code
88085166/cell_3
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fifa19/data.csv') df.info()
code
88085166/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)') df['Value'] = df['Value'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Value') df['Value'] = df['Value'].astype('float') pos_value = df.groupby('Position')['Value'].sum().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Total Value in Every Position (in thousand euros)') pos_value = df.groupby('Position')['Value'].median().sort_values(ascending=False).reset_index()[:20] horizontal_bar(pos_value, 'Value', 'Position', 'Median Value in Every Position (in thousand euros)') high_young_player_ovr = df[df['Age'] <= 20][['Name', 'Overall']].sort_values(by='Overall', ascending=False).reset_index(drop=True)[:20] horizontal_bar(high_young_player_ovr, 'Overall', 'Name', 'Players under 20 Year Old with Highest Overall Ratings')
code
88085166/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) def mil_to_thousand(data): regex = '[-+]?(\\d*\\.?\\d+)' data = str(data) size = len(data) if data[size - 1] == 'M': data = re.findall(regex, data) data = np.asarray(data).astype('float64') data = data * 1000 else: data = re.findall(regex, data) data = np.asarray(data).astype('float64') return data df['Release Clause'] = df['Release Clause'].apply(lambda x: mil_to_thousand(x)) df = df.explode('Release Clause') df['Release Clause'] = df['Release Clause'] players_rc = df[['Name', 'Release Clause']].sort_values(by='Release Clause', ascending=False)[:20] horizontal_bar(players_rc, 'Release Clause', 'Name', 'Players with Highest Release Clause (in thousand euros)')
code
88085166/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] def make_distplot(data,title): fig,ax=plt.subplots(figsize=(10,5)) ax.set_title(title) sns.distplot(data,bins=30) def horizontal_bar(data,x,y,title): fig,ax=plt.subplots(figsize=(10,5)) sns.barplot(data=data,x=x,y=y,color='steelblue') ax.set_xlabel(x) ax.set_ylabel(y) ax.set_title(title) df['Wage'] = df['Wage'].str.extract('(\\d+)') df['Wage'] = df['Wage'].astype('float64') highest_wage = df[['Name', 'Wage']].sort_values(by='Wage', ascending=False)[:20] horizontal_bar(highest_wage, 'Wage', 'Name', 'Players with Highest Wage')
code
88085166/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/fifa19/data.csv') percent_null = [] n_col = df.columns for col in n_col: percent_null.append(df[col].isnull().sum() / len(df[col]) * 100) df_missing = pd.DataFrame(percent_null, index=df.columns, columns=['percent_missing']) df_missing = df_missing.reset_index().rename({'index': 'column_name'}, axis=1) df_missing = df_missing[df_missing['percent_missing'] > 0].sort_values(by='percent_missing', ascending=False) clr = ['steelblue' if percent > 10 else 'skyblue' for percent in df_missing['percent_missing']] plt.figure(figsize=(10, 20)) sns.barplot(data=df_missing, x='percent_missing', y='column_name', palette=clr)
code
128023684/cell_21
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') combined_pair = df_combined[['prod_cat', 'Store_type']].drop_duplicates().to_dict('records') contribution_dictionary = {} for j in range(len(combined_pair)): df_sub_part = df_combined[(df_combined.prod_cat == combined_pair[j]['prod_cat']) & (df_combined.Store_type == combined_pair[j]['Store_type'])].reset_index() contribution_dictionary[combined_pair[j]['prod_cat'], combined_pair[j]['Store_type']] = np.round(100 * df_sub_part['total_amt'].sum() / df_combined['total_amt'].sum(), 2) import operator sorted_x = {k: v for k, v in sorted(contribution_dictionary.items(), key=lambda item: item[1], reverse=True)} sorted_x
code
128023684/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_third.columns
code
128023684/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') df_combined
code
128023684/cell_23
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_third = pd.read_csv('/kaggle/input/retail-case-study-data/Transactions.csv') md = df_third.groupby(['cust_id', 'prod_cat_code', 'prod_subcat_code'])['transaction_id'].count().to_dict() df_combined = pd.merge(df_third, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') grp_df = df_combined.groupby(['cust_id', 'prod_cat', 'prod_subcat', 'tran_date'])['tran_date', 'prod_subcat'].count() df_third.columns df_third_part = df_third[df_third.Rate > 0].reset_index() df_combined = pd.merge(df_third_part, df_first, left_on=['prod_cat_code', 'prod_subcat_code'], right_on=['prod_cat_code', 'prod_sub_cat_code'], how='left') md = pd.DataFrame(df_combined[['cust_id', 'prod_cat', 'prod_subcat', 'tran_date']].value_counts()) combined_pair = df_combined[['prod_cat', 'Store_type']].drop_duplicates().to_dict('records') contribution_dictionary = {} for j in range(len(combined_pair)): df_sub_part = df_combined[(df_combined.prod_cat == combined_pair[j]['prod_cat']) & (df_combined.Store_type == combined_pair[j]['Store_type'])].reset_index() contribution_dictionary[combined_pair[j]['prod_cat'], combined_pair[j]['Store_type']] = np.round(100 * df_sub_part['total_amt'].sum() / df_combined['total_amt'].sum(), 2) import operator sorted_x = {k: v for k, v in sorted(contribution_dictionary.items(), key=lambda item: item[1], reverse=True)} df_pie = pd.DataFrame({'item and channel': list(sorted_x.keys()), 'contribution': list(sorted_x.values())}, index=list(sorted_x.keys())) df_pie.plot(kind='pie', legend=None, y='contribution', autopct='%1.0f%%', figsize=(10, 20))
code
128023684/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_first
code
128023684/cell_29
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_first = pd.read_csv('/kaggle/input/retail-case-study-data/prod_cat_info.csv') df_second = pd.read_csv('/kaggle/input/retail-case-study-data/Customer.csv') df_second.keys()
code