path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
104124186/cell_8
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) y_pred a = x_test b = y_test c = x_test d = y_pred plt.scatter(a, b) plt.scatter(c, d) plt.grid() plt.show()
code
104124186/cell_16
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd data = pd.read_csv('../input/salary/Salary_Data.csv') data df = pd.DataFrame(data) df x = df.YearsExperience.values.reshape(-1, 1) y = df.Salary.values.reshape(-1, 1) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) y_pred compare = pd.DataFrame({'Actual': y_test.flatten(), 'Prediction': y_pred.flatten()}) compare df2 = pd.DataFrame({'YearsExperience': [1.5, 2.5, 3.5, 4.5, 5], 'Salary': [1, 4, 8, 9, 10]}) df3 = df.append(df2) df3
code
104124186/cell_3
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/salary/Salary_Data.csv') data df = pd.DataFrame(data) df
code
104124186/cell_24
[ "text_html_output_1.png" ]
YearsExperience = float(input('please enter the years expercience: ')) Salary = 26986.69131674 + 9379.71049195 * YearsExperience print(Salary)
code
104124186/cell_14
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/salary/Salary_Data.csv') data df = pd.DataFrame(data) df regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) y_pred a = x_test b = y_test c = x_test d = y_pred a = x_test b = y_test c = x_test d = y_pred compare = pd.DataFrame({'Actual': y_test.flatten(), 'Prediction': y_pred.flatten()}) compare a = x_train b = y_train c = x_test d = y_pred plt.scatter(a, b) plt.scatter(c, d) plt.grid() plt.show()
code
104124186/cell_22
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/salary/Salary_Data.csv') data df = pd.DataFrame(data) df x = df.YearsExperience.values.reshape(-1, 1) y = df.Salary.values.reshape(-1, 1) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) y_pred a = x_test b = y_test c = x_test d = y_pred a = x_test b = y_test c = x_test d = y_pred compare = pd.DataFrame({'Actual': y_test.flatten(), 'Prediction': y_pred.flatten()}) compare a = x_train b = y_train c = x_test d = y_pred df2 = pd.DataFrame({'YearsExperience': [1.5, 2.5, 3.5, 4.5, 5], 'Salary': [1, 4, 8, 9, 10]}) df3 = df.append(df2) df3 train = df3.iloc[:25] test = df3.iloc[25:] x_train = df3['YearsExperience'][:30].values.reshape(-1, 1) y_train = df3['Salary'][:30].values.reshape(-1, 1) x_test = df3['YearsExperience'][30:].values.reshape(-1, 1) regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) a = x_train b = y_train c = x_test d = y_pred plt.scatter(a, b) plt.scatter(c, d) plt.grid() plt.show()
code
104124186/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) y_pred print(regressor.intercept_) print(regressor.coef_)
code
104124186/cell_12
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) y_pred a = x_test b = y_test c = x_test d = y_pred a = x_test b = y_test c = x_test d = y_pred plt.scatter(y_test, y_pred) plt.show()
code
128000238/cell_2
[ "text_plain_output_1.png" ]
!pip install azure-ai-textanalytics --pre
code
128000238/cell_7
[ "text_plain_output_1.png" ]
def create_twitter_url(): handle = 'nasi goreng' max_results = 10 mrf = 'max_results={}'.format(max_results) q = 'query={}'.format(handle) url = 'https://api.twitter.com/2/tweets/search/recent?{}&{}'.format(mrf, q) return url create_twitter_url()
code
128000238/cell_16
[ "text_plain_output_1.png" ]
from azure.ai.textanalytics import TextAnalyticsClient from azure.core.credentials import AzureKeyCredential from kaggle_secrets import UserSecretsClient import ast import json import requests import requests import json import ast from azure.core.credentials import AzureKeyCredential from azure.ai.textanalytics import TextAnalyticsClient from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() azure_endpoint = user_secrets.get_secret('AZURE_LANGUAGE_ENDPOINT') azure_language_key = user_secrets.get_secret('AZURE_LANGUAGE_KEY') bearer_token = user_secrets.get_secret('bearer_token') def create_twitter_url(): handle = 'nasi goreng' max_results = 10 mrf = 'max_results={}'.format(max_results) q = 'query={}'.format(handle) url = 'https://api.twitter.com/2/tweets/search/recent?{}&{}'.format(mrf, q) return url def twitter_auth_and_connect(bearer_token, url): headers = {'Authorization': 'Bearer {}'.format(bearer_token)} response = requests.request('GET', url, headers=headers) return response.json() def lang_data_shape(res_json): data_only = res_json['data'] doc_start = '"documents": {}'.format(data_only) str_json = '{' + doc_start + '}' dump_doc = json.dumps(str_json) doc = json.loads(dump_doc) return ast.literal_eval(doc) def main(): url = create_twitter_url() res_json = twitter_auth_and_connect(bearer_token, url) documents = lang_data_shape(res_json) text_analytics_client = TextAnalyticsClient(endpoint=azure_endpoint, credential=AzureKeyCredential(azure_language_key)) result = text_analytics_client.analyze_sentiment(documents['documents'], show_opinion_mining=False) doc_result = [doc for doc in result if not doc.is_error] for document in doc_result: print('\n') print(document.sentiment) print('=======') for sentence in document.sentences: print(sentence.text) if __name__ == '__main__': main()
code
17136911/cell_9
[ "text_plain_output_1.png" ]
from keras import layers from keras.optimizers import Adam from keras.layers import Input, Dense, Activation, BatchNormalization, Flatten from keras.layers import Dropout from keras.models import Model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model from keras.models import Sequential from keras.layers import Bidirectional from keras.layers import LSTM
code
17136911/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Bidirectional from keras.layers import Input, Dense, Activation, BatchNormalization, Flatten from keras.layers import LSTM from keras.models import Sequential from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) le = LabelEncoder() oh = OneHotEncoder(sparse=False) df = pd.read_csv('../input/train.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'type', 'scalar_coupling_constant']) df_m_coupling_contributions = pd.read_csv('../input/scalar_coupling_contributions.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'fc', 'sd', 'pso', 'dso']) df = pd.merge(df, df_m_coupling_contributions, on=['molecule_name', 'atom_index_0', 'atom_index_1']) df_m_diple_moments = pd.read_csv('../input/dipole_moments.csv', sep=',', header=0, usecols=['molecule_name', 'X', 'Y', 'Z']) df = pd.merge(df, df_m_diple_moments, on='molecule_name') df_m_pot_engy = pd.read_csv('../input/potential_energy.csv', sep=',', header=0, usecols=['molecule_name', 'potential_energy']) df = pd.merge(df, df_m_pot_engy, on='molecule_name') df_a_str = pd.read_csv('../input/structures.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'atom', 'x', 'y', 'z']) f = df_a_str['atom'].values f = np.reshape(f, (-1, 1)) f = oh.fit_transform(f) ohdf = pd.DataFrame(f) df_a_str = pd.concat([df_a_str, ohdf], axis=1) df_a_str = df_a_str.rename(index=str, columns={0: 'A0', 1: 'A1', 2: 'A2', 3: 'A3', 4: 'A4'}) df_a_str.drop(columns=['atom'], inplace=True) df_a_mag_sh_tensor = pd.read_csv('../input/magnetic_shielding_tensors.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'XX', 'YX', 'ZX', 'XY', 'YY', 'ZY', 'XZ', 'YZ', 'ZZ']) df_a_mlkn_charges = pd.read_csv('../input/mulliken_charges.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'mulliken_charge']) df_a_str = pd.merge(df_a_str, df_a_mag_sh_tensor, on=['molecule_name', 'atom_index']) df_a_str = pd.merge(df_a_str, df_a_mlkn_charges, on=['molecule_name', 'atom_index']) df_atom_1_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_0', 'A0': 'A0_0', 'A1': 'A1_0', 'A2': 'A2_0', 'A3': 'A3_0', 'A4': 'A4_0', 'x': 'x_0', 'y': 'y_0', 'z': 'z_0', 'XX': 'XX_0', 'YX': 'YX_0', 'ZX': 'ZX_0', 'XY': 'XY_0', 'YY': 'YY_0', 'ZY': 'ZY_0', 'XZ': 'XZ_0', 'YZ': 'YZ_0', 'ZZ': 'ZZ_0', 'mulliken_charge': 'mulliken_charge_0'}) df = pd.merge(df, df_atom_1_prop, on=['molecule_name', 'atom_index_0']) df_atom_2_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_1', 'A0': 'A0_1', 'A1': 'A1_1', 'A2': 'A2_1', 'A3': 'A3_1', 'A4': 'A4_1', 'x': 'x_1', 'y': 'y_1', 'z': 'z_1', 'XX': 'XX_1', 'YX': 'YX_1', 'ZX': 'ZX_1', 'XY': 'XY_1', 'YY': 'YY_1', 'ZY': 'ZY_1', 'XZ': 'XZ_1', 'YZ': 'YZ_1', 'ZZ': 'ZZ_1', 'mulliken_charge': 'mulliken_charge_1'}) df = pd.merge(df, df_atom_2_prop, on=['molecule_name', 'atom_index_1']) ss = StandardScaler() df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']] = ss.fit_transform(df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']]) def build_atom_pairs(name, molecule): df = molecule.apply(list) atom_pair_y = np.zeros((df.shape[0], 8)) atom_pair = np.zeros((df.shape[0], 2, 18)) atom_pair[:, 0, :] = df.as_matrix(columns=['x_0', 'y_0', 'z_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0']) atom_pair[:, 1, :] = df.as_matrix(columns=['x_1', 'y_1', 'z_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1']) atom_pair_y = df.as_matrix(columns=['potential_energy', 'X', 'Y', 'Z', 'fc', 'sd', 'pso', 'dso']) return (atom_pair, atom_pair_y) moleculelist = [] molecule_ylist = [] molecules = df.groupby('molecule_name') c = 0 for name, molecule in molecules: atoms, molecule_y = build_atom_pairs(name, molecule) amolecule = np.zeros((650, atoms.shape[1], atoms.shape[2])) amolecule[:atoms.shape[0], :atoms.shape[1], :atoms.shape[2]] = atoms amolecule = amolecule.transpose([0, 2, 1]).reshape(amolecule.shape[0], -1) amolecule_y = np.zeros((650, molecule_y.shape[1])) amolecule_y[:molecule_y.shape[0], :molecule_y.shape[1]] = molecule_y moleculelist.append(amolecule) molecule_ylist.append(amolecule_y) c = c + 1 if c > 10000: break def BRNNModel(inputdim): model = Sequential() model.add(Bidirectional(LSTM(100, return_sequences=True, input_dim=inputdim))) model.add(Dense(8)) model.add(Activation('relu')) return model def batch_generator(X, y, batch_size): number_of_batches = X.shape[0] / batch_size counter = 0 shuffle_index = np.arange(np.shape(y)[0]) while 1: index_batch = shuffle_index[batch_size * counter:batch_size * (counter + 1)] X_batch = X[index_batch, :].todense() y_batch = y[index_batch] counter += 1 yield (np.array(X_batch), y_batch) if counter > number_of_batches: counter = 0 X = np.asarray(moleculelist) y = np.asarray(molecule_ylist) model = BRNNModel(X.shape[1]) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) model.fit(X_train, Y_train, epochs=30, batch_size=16, verbose=2) preds = model.evaluate(x=X_test, y=Y_test) print() print('Loss = ' + str(preds[0])) print('Test Accuracy = ' + str(preds[1]))
code
17136911/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn import linear_model from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder import os print(os.listdir('../input'))
code
17136911/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) le = LabelEncoder() oh = OneHotEncoder(sparse=False) df = pd.read_csv('../input/train.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'type', 'scalar_coupling_constant']) df_m_coupling_contributions = pd.read_csv('../input/scalar_coupling_contributions.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'fc', 'sd', 'pso', 'dso']) df = pd.merge(df, df_m_coupling_contributions, on=['molecule_name', 'atom_index_0', 'atom_index_1']) df_m_diple_moments = pd.read_csv('../input/dipole_moments.csv', sep=',', header=0, usecols=['molecule_name', 'X', 'Y', 'Z']) df = pd.merge(df, df_m_diple_moments, on='molecule_name') df_m_pot_engy = pd.read_csv('../input/potential_energy.csv', sep=',', header=0, usecols=['molecule_name', 'potential_energy']) df = pd.merge(df, df_m_pot_engy, on='molecule_name') df_a_str = pd.read_csv('../input/structures.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'atom', 'x', 'y', 'z']) f = df_a_str['atom'].values f = np.reshape(f, (-1, 1)) f = oh.fit_transform(f) ohdf = pd.DataFrame(f) df_a_str = pd.concat([df_a_str, ohdf], axis=1) df_a_str = df_a_str.rename(index=str, columns={0: 'A0', 1: 'A1', 2: 'A2', 3: 'A3', 4: 'A4'}) df_a_str.drop(columns=['atom'], inplace=True) df_a_mag_sh_tensor = pd.read_csv('../input/magnetic_shielding_tensors.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'XX', 'YX', 'ZX', 'XY', 'YY', 'ZY', 'XZ', 'YZ', 'ZZ']) df_a_mlkn_charges = pd.read_csv('../input/mulliken_charges.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'mulliken_charge']) df_a_str = pd.merge(df_a_str, df_a_mag_sh_tensor, on=['molecule_name', 'atom_index']) df_a_str = pd.merge(df_a_str, df_a_mlkn_charges, on=['molecule_name', 'atom_index']) df_atom_1_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_0', 'A0': 'A0_0', 'A1': 'A1_0', 'A2': 'A2_0', 'A3': 'A3_0', 'A4': 'A4_0', 'x': 'x_0', 'y': 'y_0', 'z': 'z_0', 'XX': 'XX_0', 'YX': 'YX_0', 'ZX': 'ZX_0', 'XY': 'XY_0', 'YY': 'YY_0', 'ZY': 'ZY_0', 'XZ': 'XZ_0', 'YZ': 'YZ_0', 'ZZ': 'ZZ_0', 'mulliken_charge': 'mulliken_charge_0'}) df = pd.merge(df, df_atom_1_prop, on=['molecule_name', 'atom_index_0']) df_atom_2_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_1', 'A0': 'A0_1', 'A1': 'A1_1', 'A2': 'A2_1', 'A3': 'A3_1', 'A4': 'A4_1', 'x': 'x_1', 'y': 'y_1', 'z': 'z_1', 'XX': 'XX_1', 'YX': 'YX_1', 'ZX': 'ZX_1', 'XY': 'XY_1', 'YY': 'YY_1', 'ZY': 'ZY_1', 'XZ': 'XZ_1', 'YZ': 'YZ_1', 'ZZ': 'ZZ_1', 'mulliken_charge': 'mulliken_charge_1'}) df = pd.merge(df, df_atom_2_prop, on=['molecule_name', 'atom_index_1']) ss = StandardScaler() df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']] = ss.fit_transform(df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']]) def build_atom_pairs(name, molecule): df = molecule.apply(list) atom_pair_y = np.zeros((df.shape[0], 8)) atom_pair = np.zeros((df.shape[0], 2, 18)) atom_pair[:, 0, :] = df.as_matrix(columns=['x_0', 'y_0', 'z_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0']) atom_pair[:, 1, :] = df.as_matrix(columns=['x_1', 'y_1', 'z_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1']) atom_pair_y = df.as_matrix(columns=['potential_energy', 'X', 'Y', 'Z', 'fc', 'sd', 'pso', 'dso']) return (atom_pair, atom_pair_y) moleculelist = [] molecule_ylist = [] molecules = df.groupby('molecule_name') c = 0 for name, molecule in molecules: atoms, molecule_y = build_atom_pairs(name, molecule) amolecule = np.zeros((650, atoms.shape[1], atoms.shape[2])) amolecule[:atoms.shape[0], :atoms.shape[1], :atoms.shape[2]] = atoms amolecule = amolecule.transpose([0, 2, 1]).reshape(amolecule.shape[0], -1) amolecule_y = np.zeros((650, molecule_y.shape[1])) amolecule_y[:molecule_y.shape[0], :molecule_y.shape[1]] = molecule_y moleculelist.append(amolecule) molecule_ylist.append(amolecule_y) c = c + 1 if c > 10000: break
code
17136911/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Bidirectional from keras.layers import Input, Dense, Activation, BatchNormalization, Flatten from keras.layers import LSTM from keras.models import Sequential from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) le = LabelEncoder() oh = OneHotEncoder(sparse=False) df = pd.read_csv('../input/train.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'type', 'scalar_coupling_constant']) df_m_coupling_contributions = pd.read_csv('../input/scalar_coupling_contributions.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'fc', 'sd', 'pso', 'dso']) df = pd.merge(df, df_m_coupling_contributions, on=['molecule_name', 'atom_index_0', 'atom_index_1']) df_m_diple_moments = pd.read_csv('../input/dipole_moments.csv', sep=',', header=0, usecols=['molecule_name', 'X', 'Y', 'Z']) df = pd.merge(df, df_m_diple_moments, on='molecule_name') df_m_pot_engy = pd.read_csv('../input/potential_energy.csv', sep=',', header=0, usecols=['molecule_name', 'potential_energy']) df = pd.merge(df, df_m_pot_engy, on='molecule_name') df_a_str = pd.read_csv('../input/structures.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'atom', 'x', 'y', 'z']) f = df_a_str['atom'].values f = np.reshape(f, (-1, 1)) f = oh.fit_transform(f) ohdf = pd.DataFrame(f) df_a_str = pd.concat([df_a_str, ohdf], axis=1) df_a_str = df_a_str.rename(index=str, columns={0: 'A0', 1: 'A1', 2: 'A2', 3: 'A3', 4: 'A4'}) df_a_str.drop(columns=['atom'], inplace=True) df_a_mag_sh_tensor = pd.read_csv('../input/magnetic_shielding_tensors.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'XX', 'YX', 'ZX', 'XY', 'YY', 'ZY', 'XZ', 'YZ', 'ZZ']) df_a_mlkn_charges = pd.read_csv('../input/mulliken_charges.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'mulliken_charge']) df_a_str = pd.merge(df_a_str, df_a_mag_sh_tensor, on=['molecule_name', 'atom_index']) df_a_str = pd.merge(df_a_str, df_a_mlkn_charges, on=['molecule_name', 'atom_index']) df_atom_1_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_0', 'A0': 'A0_0', 'A1': 'A1_0', 'A2': 'A2_0', 'A3': 'A3_0', 'A4': 'A4_0', 'x': 'x_0', 'y': 'y_0', 'z': 'z_0', 'XX': 'XX_0', 'YX': 'YX_0', 'ZX': 'ZX_0', 'XY': 'XY_0', 'YY': 'YY_0', 'ZY': 'ZY_0', 'XZ': 'XZ_0', 'YZ': 'YZ_0', 'ZZ': 'ZZ_0', 'mulliken_charge': 'mulliken_charge_0'}) df = pd.merge(df, df_atom_1_prop, on=['molecule_name', 'atom_index_0']) df_atom_2_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_1', 'A0': 'A0_1', 'A1': 'A1_1', 'A2': 'A2_1', 'A3': 'A3_1', 'A4': 'A4_1', 'x': 'x_1', 'y': 'y_1', 'z': 'z_1', 'XX': 'XX_1', 'YX': 'YX_1', 'ZX': 'ZX_1', 'XY': 'XY_1', 'YY': 'YY_1', 'ZY': 'ZY_1', 'XZ': 'XZ_1', 'YZ': 'YZ_1', 'ZZ': 'ZZ_1', 'mulliken_charge': 'mulliken_charge_1'}) df = pd.merge(df, df_atom_2_prop, on=['molecule_name', 'atom_index_1']) ss = StandardScaler() df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']] = ss.fit_transform(df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']]) def build_atom_pairs(name, molecule): df = molecule.apply(list) atom_pair_y = np.zeros((df.shape[0], 8)) atom_pair = np.zeros((df.shape[0], 2, 18)) atom_pair[:, 0, :] = df.as_matrix(columns=['x_0', 'y_0', 'z_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0']) atom_pair[:, 1, :] = df.as_matrix(columns=['x_1', 'y_1', 'z_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1']) atom_pair_y = df.as_matrix(columns=['potential_energy', 'X', 'Y', 'Z', 'fc', 'sd', 'pso', 'dso']) return (atom_pair, atom_pair_y) moleculelist = [] molecule_ylist = [] molecules = df.groupby('molecule_name') c = 0 for name, molecule in molecules: atoms, molecule_y = build_atom_pairs(name, molecule) amolecule = np.zeros((650, atoms.shape[1], atoms.shape[2])) amolecule[:atoms.shape[0], :atoms.shape[1], :atoms.shape[2]] = atoms amolecule = amolecule.transpose([0, 2, 1]).reshape(amolecule.shape[0], -1) amolecule_y = np.zeros((650, molecule_y.shape[1])) amolecule_y[:molecule_y.shape[0], :molecule_y.shape[1]] = molecule_y moleculelist.append(amolecule) molecule_ylist.append(amolecule_y) c = c + 1 if c > 10000: break def BRNNModel(inputdim): model = Sequential() model.add(Bidirectional(LSTM(100, return_sequences=True, input_dim=inputdim))) model.add(Dense(8)) model.add(Activation('relu')) return model def batch_generator(X, y, batch_size): number_of_batches = X.shape[0] / batch_size counter = 0 shuffle_index = np.arange(np.shape(y)[0]) while 1: index_batch = shuffle_index[batch_size * counter:batch_size * (counter + 1)] X_batch = X[index_batch, :].todense() y_batch = y[index_batch] counter += 1 yield (np.array(X_batch), y_batch) if counter > number_of_batches: counter = 0 X = np.asarray(moleculelist) y = np.asarray(molecule_ylist) model = BRNNModel(X.shape[1]) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
code
17136911/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Bidirectional from keras.layers import Input, Dense, Activation, BatchNormalization, Flatten from keras.layers import LSTM from keras.models import Sequential from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) le = LabelEncoder() oh = OneHotEncoder(sparse=False) df = pd.read_csv('../input/train.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'type', 'scalar_coupling_constant']) df_m_coupling_contributions = pd.read_csv('../input/scalar_coupling_contributions.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index_0', 'atom_index_1', 'fc', 'sd', 'pso', 'dso']) df = pd.merge(df, df_m_coupling_contributions, on=['molecule_name', 'atom_index_0', 'atom_index_1']) df_m_diple_moments = pd.read_csv('../input/dipole_moments.csv', sep=',', header=0, usecols=['molecule_name', 'X', 'Y', 'Z']) df = pd.merge(df, df_m_diple_moments, on='molecule_name') df_m_pot_engy = pd.read_csv('../input/potential_energy.csv', sep=',', header=0, usecols=['molecule_name', 'potential_energy']) df = pd.merge(df, df_m_pot_engy, on='molecule_name') df_a_str = pd.read_csv('../input/structures.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'atom', 'x', 'y', 'z']) f = df_a_str['atom'].values f = np.reshape(f, (-1, 1)) f = oh.fit_transform(f) ohdf = pd.DataFrame(f) df_a_str = pd.concat([df_a_str, ohdf], axis=1) df_a_str = df_a_str.rename(index=str, columns={0: 'A0', 1: 'A1', 2: 'A2', 3: 'A3', 4: 'A4'}) df_a_str.drop(columns=['atom'], inplace=True) df_a_mag_sh_tensor = pd.read_csv('../input/magnetic_shielding_tensors.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'XX', 'YX', 'ZX', 'XY', 'YY', 'ZY', 'XZ', 'YZ', 'ZZ']) df_a_mlkn_charges = pd.read_csv('../input/mulliken_charges.csv', sep=',', header=0, usecols=['molecule_name', 'atom_index', 'mulliken_charge']) df_a_str = pd.merge(df_a_str, df_a_mag_sh_tensor, on=['molecule_name', 'atom_index']) df_a_str = pd.merge(df_a_str, df_a_mlkn_charges, on=['molecule_name', 'atom_index']) df_atom_1_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_0', 'A0': 'A0_0', 'A1': 'A1_0', 'A2': 'A2_0', 'A3': 'A3_0', 'A4': 'A4_0', 'x': 'x_0', 'y': 'y_0', 'z': 'z_0', 'XX': 'XX_0', 'YX': 'YX_0', 'ZX': 'ZX_0', 'XY': 'XY_0', 'YY': 'YY_0', 'ZY': 'ZY_0', 'XZ': 'XZ_0', 'YZ': 'YZ_0', 'ZZ': 'ZZ_0', 'mulliken_charge': 'mulliken_charge_0'}) df = pd.merge(df, df_atom_1_prop, on=['molecule_name', 'atom_index_0']) df_atom_2_prop = df_a_str.rename(index=str, columns={'atom_index': 'atom_index_1', 'A0': 'A0_1', 'A1': 'A1_1', 'A2': 'A2_1', 'A3': 'A3_1', 'A4': 'A4_1', 'x': 'x_1', 'y': 'y_1', 'z': 'z_1', 'XX': 'XX_1', 'YX': 'YX_1', 'ZX': 'ZX_1', 'XY': 'XY_1', 'YY': 'YY_1', 'ZY': 'ZY_1', 'XZ': 'XZ_1', 'YZ': 'YZ_1', 'ZZ': 'ZZ_1', 'mulliken_charge': 'mulliken_charge_1'}) df = pd.merge(df, df_atom_2_prop, on=['molecule_name', 'atom_index_1']) ss = StandardScaler() df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']] = ss.fit_transform(df[['scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso', 'X', 'Y', 'Z', 'potential_energy', 'x_0', 'y_0', 'z_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'x_1', 'y_1', 'z_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1']]) def build_atom_pairs(name, molecule): df = molecule.apply(list) atom_pair_y = np.zeros((df.shape[0], 8)) atom_pair = np.zeros((df.shape[0], 2, 18)) atom_pair[:, 0, :] = df.as_matrix(columns=['x_0', 'y_0', 'z_0', 'XX_0', 'YX_0', 'ZX_0', 'XY_0', 'YY_0', 'ZY_0', 'XZ_0', 'YZ_0', 'ZZ_0', 'mulliken_charge_0', 'A0_0', 'A1_0', 'A2_0', 'A3_0', 'A4_0']) atom_pair[:, 1, :] = df.as_matrix(columns=['x_1', 'y_1', 'z_1', 'XX_1', 'YX_1', 'ZX_1', 'XY_1', 'YY_1', 'ZY_1', 'XZ_1', 'YZ_1', 'ZZ_1', 'mulliken_charge_1', 'A0_1', 'A1_1', 'A2_1', 'A3_1', 'A4_1']) atom_pair_y = df.as_matrix(columns=['potential_energy', 'X', 'Y', 'Z', 'fc', 'sd', 'pso', 'dso']) return (atom_pair, atom_pair_y) moleculelist = [] molecule_ylist = [] molecules = df.groupby('molecule_name') c = 0 for name, molecule in molecules: atoms, molecule_y = build_atom_pairs(name, molecule) amolecule = np.zeros((650, atoms.shape[1], atoms.shape[2])) amolecule[:atoms.shape[0], :atoms.shape[1], :atoms.shape[2]] = atoms amolecule = amolecule.transpose([0, 2, 1]).reshape(amolecule.shape[0], -1) amolecule_y = np.zeros((650, molecule_y.shape[1])) amolecule_y[:molecule_y.shape[0], :molecule_y.shape[1]] = molecule_y moleculelist.append(amolecule) molecule_ylist.append(amolecule_y) c = c + 1 if c > 10000: break def BRNNModel(inputdim): model = Sequential() model.add(Bidirectional(LSTM(100, return_sequences=True, input_dim=inputdim))) model.add(Dense(8)) model.add(Activation('relu')) return model def batch_generator(X, y, batch_size): number_of_batches = X.shape[0] / batch_size counter = 0 shuffle_index = np.arange(np.shape(y)[0]) while 1: index_batch = shuffle_index[batch_size * counter:batch_size * (counter + 1)] X_batch = X[index_batch, :].todense() y_batch = y[index_batch] counter += 1 yield (np.array(X_batch), y_batch) if counter > number_of_batches: counter = 0 X = np.asarray(moleculelist) y = np.asarray(molecule_ylist) model = BRNNModel(X.shape[1]) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) model.fit(X_train, Y_train, epochs=30, batch_size=16, verbose=2)
code
16118732/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from PIL import Image import cv2 import glob import glob import os import os import os import os import pydicom import pydicom import pydicom import numpy as np import pandas as pd import os import cv2 import os import pydicom inputdir = '../input/sample images/' outdir = './' test_list = [os.path.basename(x) for x in glob.glob(inputdir + './*.dcm')] for f in test_list: ds = pydicom.read_file(inputdir + f) img = ds.pixel_array cv2.imwrite(outdir + f.replace('.dcm', '.png'), img) import os import pydicom import glob from PIL import Image inputdir = '../input/sample images/' outdir = './' test_list = [os.path.basename(x) for x in glob.glob(inputdir + './*.dcm')] for f in test_list: ds = pydicom.read_file(inputdir + f) img = ds.pixel_array img_mem = Image.fromarray(img) img_mem.save(outdir + f.replace('.dcm', '.png')) import os import pydicom import glob from PIL import Image inputdir = '../input/sample images/' outdir = './' test_list = [os.path.basename(x) for x in glob.glob(inputdir + './*.dcm')] for f in test_list: ds = pydicom.read_file(inputdir + f) img = ds.pixel_array img_mem = Image.fromarray(img) img_mem.save(outdir + f.replace('.dcm', '.jp2'))
code
16118732/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 import os import os import pydicom import numpy as np import pandas as pd import os import cv2 import os import pydicom inputdir = '../input/sample images/' outdir = './' test_list = [os.path.basename(x) for x in glob.glob(inputdir + './*.dcm')] for f in test_list: ds = pydicom.read_file(inputdir + f) img = ds.pixel_array cv2.imwrite(outdir + f.replace('.dcm', '.png'), img)
code
16118732/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input/sample images'))
code
323056/cell_4
[ "text_plain_output_1.png" ]
imgs.keys() (imgs[1].shape, masks[1].shape)
code
323056/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 import glob import numpy as np def load_cv2_images(folder): imgs, masks, img_ids = ({}, {}, {}) for i in range(47): imgs[i + 1] = [] masks[i + 1] = [] img_ids[i + 1] = [] paths = glob.glob(folder + '*.tif') paths = [p for p in paths if 'mask' not in p] for p in paths: index = int(p.split('/')[3].split('_')[0]) try: imgs[index].append(cv2.imread(p, 0)) masks[index].append(cv2.imread(p[:-4] + '_mask.tif', 0)) img_ids[index].append(p.split('/')[3]) except: pass for i in range(47): imgs[i + 1] = np.array(imgs[i + 1]) masks[i + 1] = np.array(masks[i + 1]) return (imgs, masks, img_ids) imgs, masks, img_ids = load_cv2_images('../input/train/') imgs.keys() def find_pairs(compare_img, compare_mask, compare_id, imgs, masks, img_ids, compare_index, matches): threshold = 23000000 for i, (img, mask, img_id) in enumerate(zip(imgs, masks, img_ids)): if np.abs(compare_img - img).sum() < threshold and i != compare_index and ((compare_mask.sum() == 0) != (mask.sum() == 0)): matches.append((compare_img, compare_mask, compare_id, img, mask, img_id)) return matches matches = [] for j in range(47): for i, (img, mask, img_id) in enumerate(zip(imgs[j + 1], masks[j + 1], img_ids[j + 1])): matches = find_pairs(img, mask, img_id, imgs[j + 1], masks[j + 1], img_ids[j + 1], i, matches) len(matches) repeats, unique = ([], []) for i, m in enumerate(matches): if m[0].sum() not in repeats or m[3].sum() not in repeats: unique.append(m[0].sum()) fig, ax = plt.subplots(2, 2) if m[1].sum() == 0: i1, i2 = (1, 0) else: i1, i2 = (0, 1) ax[i1][0].imshow(m[0], cmap='hot') ax[i1][0].set_title(m[2]) ax[i1][1].imshow(m[1], cmap='hot') ax[i1][1].set_title(m[2][:-4] + '_mask.tif') ax[i2][0].imshow(m[3], cmap='hot') ax[i2][0].set_title(m[5]) ax[i2][1].imshow(m[4], cmap='hot') ax[i2][1].set_title(m[5][:-4] + '_mask.tif') fig.subplots_adjust(hspace=0.4) plt.show() repeats.append(m[0].sum()) repeats.append(m[3].sum())
code
323056/cell_7
[ "text_plain_output_1.png" ]
import cv2 import glob import numpy as np def load_cv2_images(folder): imgs, masks, img_ids = ({}, {}, {}) for i in range(47): imgs[i + 1] = [] masks[i + 1] = [] img_ids[i + 1] = [] paths = glob.glob(folder + '*.tif') paths = [p for p in paths if 'mask' not in p] for p in paths: index = int(p.split('/')[3].split('_')[0]) try: imgs[index].append(cv2.imread(p, 0)) masks[index].append(cv2.imread(p[:-4] + '_mask.tif', 0)) img_ids[index].append(p.split('/')[3]) except: pass for i in range(47): imgs[i + 1] = np.array(imgs[i + 1]) masks[i + 1] = np.array(masks[i + 1]) return (imgs, masks, img_ids) imgs, masks, img_ids = load_cv2_images('../input/train/') imgs.keys() def find_pairs(compare_img, compare_mask, compare_id, imgs, masks, img_ids, compare_index, matches): threshold = 23000000 for i, (img, mask, img_id) in enumerate(zip(imgs, masks, img_ids)): if np.abs(compare_img - img).sum() < threshold and i != compare_index and ((compare_mask.sum() == 0) != (mask.sum() == 0)): matches.append((compare_img, compare_mask, compare_id, img, mask, img_id)) return matches matches = [] for j in range(47): for i, (img, mask, img_id) in enumerate(zip(imgs[j + 1], masks[j + 1], img_ids[j + 1])): matches = find_pairs(img, mask, img_id, imgs[j + 1], masks[j + 1], img_ids[j + 1], i, matches) len(matches) # Print the matches, avoiding duplicates repeats, unique = [], [] for i, m in enumerate(matches): # Using pixel sums as an ID for the picture if m[0].sum() not in repeats\ or m[3].sum() not in repeats: unique.append(m[0].sum()) fig, ax = plt.subplots(2, 2) if m[1].sum() == 0: i1, i2 = 1, 0 else: i1, i2 = 0, 1 ax[i1][0].imshow(m[0], cmap='hot') ax[i1][0].set_title(m[2]) ax[i1][1].imshow(m[1], cmap='hot') ax[i1][1].set_title(m[2][:-4]+'_mask.tif') ax[i2][0].imshow(m[3], cmap='hot') ax[i2][0].set_title(m[5]) ax[i2][1].imshow(m[4], cmap='hot') ax[i2][1].set_title(m[5][:-4]+'_mask.tif') fig.subplots_adjust(hspace=0.4) plt.show() repeats.append(m[0].sum()) repeats.append(m[3].sum()) len(unique)
code
323056/cell_3
[ "text_plain_output_1.png" ]
imgs.keys()
code
323056/cell_5
[ "text_plain_output_1.png" ]
import cv2 import glob import numpy as np def load_cv2_images(folder): imgs, masks, img_ids = ({}, {}, {}) for i in range(47): imgs[i + 1] = [] masks[i + 1] = [] img_ids[i + 1] = [] paths = glob.glob(folder + '*.tif') paths = [p for p in paths if 'mask' not in p] for p in paths: index = int(p.split('/')[3].split('_')[0]) try: imgs[index].append(cv2.imread(p, 0)) masks[index].append(cv2.imread(p[:-4] + '_mask.tif', 0)) img_ids[index].append(p.split('/')[3]) except: pass for i in range(47): imgs[i + 1] = np.array(imgs[i + 1]) masks[i + 1] = np.array(masks[i + 1]) return (imgs, masks, img_ids) imgs, masks, img_ids = load_cv2_images('../input/train/') imgs.keys() def find_pairs(compare_img, compare_mask, compare_id, imgs, masks, img_ids, compare_index, matches): threshold = 23000000 for i, (img, mask, img_id) in enumerate(zip(imgs, masks, img_ids)): if np.abs(compare_img - img).sum() < threshold and i != compare_index and ((compare_mask.sum() == 0) != (mask.sum() == 0)): matches.append((compare_img, compare_mask, compare_id, img, mask, img_id)) return matches matches = [] for j in range(47): for i, (img, mask, img_id) in enumerate(zip(imgs[j + 1], masks[j + 1], img_ids[j + 1])): matches = find_pairs(img, mask, img_id, imgs[j + 1], masks[j + 1], img_ids[j + 1], i, matches) len(matches)
code
72071704/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.isnull().sum() df1 = df.groupby('Country')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('City')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('State')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('AirportName')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df['lon'] = df.Centroid.apply(lambda x: x.split(' ')[0].replace('POINT(', ' ')) df['lat'] = df.Centroid.apply(lambda x: x.split(' ')[1].replace(')', ' ')) df1 = df.groupby(['Country', 'City', 'lat', 'lon'])['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() fig = px.scatter_geo(df1, lat='lat', lon='lon', hover_name='Country', color='Country', hover_data=['PercentOfBaseline', 'City'], labels={'PercentOfBaseline': 'Percent of Baseline'}) fig.update_geos(showocean=True, oceancolor='LightCyan', lakecolor='LightSteelBlue', showlakes=True) fig.show()
code
72071704/cell_9
[ "text_html_output_2.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.isnull().sum() df1 = df.groupby('Country')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('City')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) plt.figure(figsize=[10, 10]) sns.barplot(data=df1, x='PercentOfBaseline', y='City', palette='GnBu') plt.xlabel('Percent of baseline')
code
72071704/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.head()
code
72071704/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.isnull().sum()
code
72071704/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.isnull().sum() df1 = df.groupby('Country')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('City')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('State')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('AirportName')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) plt.figure(figsize=[10, 10]) sns.barplot(data=df1, x='PercentOfBaseline', y='AirportName', palette='crest') plt.xlabel('Percent of baseline') plt.ylabel('Airport name')
code
72071704/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72071704/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.isnull().sum() df['Country'].unique()
code
72071704/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.isnull().sum() df1 = df.groupby('Country')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) plt.figure(figsize=[10, 7]) sns.barplot(data=df1, x='Country', y='PercentOfBaseline', palette='GnBu_r') plt.ylabel('Percent of baseline')
code
72071704/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.isnull().sum() df1 = df.groupby('Country')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('City')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) df1 = df.groupby('State')['PercentOfBaseline'].mean().sort_values(ascending=False).reset_index() sns.set(font_scale=1.2) plt.figure(figsize=[10, 10]) sns.barplot(data=df1, x='PercentOfBaseline', y='State', palette='Greens') plt.xlabel('Percent of baseline')
code
72071704/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/covid19s-impact-on-airport-traffic/covid_impact_on_airport_traffic.csv') df.info()
code
1004678/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) houseprice_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') houseprice_df.head()
code
1004678/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1004678/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) houseprice_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') houseprice_df.info() print('----------------------------') test_df.info()
code
1004678/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) houseprice_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') houseprice_df = houseprice_df.drop(['Alley', 'PoolQC', 'Fence'], axis=1) test_df = test_df.drop(['Alley', 'PoolQC', 'Fence'], axis=1) print(pd.value_counts(houseprice_df['MSSubClass'].values, sort=False)) houseprice_df['MSSubClass'].plot(kind='hist', figsize=(15, 3), bins=50, xlim=(0, 100)) sns.factorplot('MSSubClass', 'Survived', order=[1, 2, 3], data=titanic_df, size=5)
code
89138107/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(101, 126)] high_correlated_cols(train_df[cols], plot=True) drop_list5 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(126, 151)] high_correlated_cols(train_df[cols], plot=True) drop_list6 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(151, 176)] high_correlated_cols(train_df[cols], plot=True) drop_list7 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(101, 126)] high_correlated_cols(train_df[cols], plot=True) drop_list5 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(126, 151)] high_correlated_cols(train_df[cols], plot=True) drop_list6 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(151, 176)] high_correlated_cols(train_df[cols], plot=True) drop_list7 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(176, 201)] high_correlated_cols(train_df[cols], plot=True) drop_list8 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(101, 126)] high_correlated_cols(train_df[cols], plot=True) drop_list5 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(126, 151)] high_correlated_cols(train_df[cols], plot=True) drop_list6 = high_correlated_cols(train_df[cols], plot=False) drop_list6
code
89138107/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(101, 126)] high_correlated_cols(train_df[cols], plot=True) drop_list5 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(126, 151)] high_correlated_cols(train_df[cols], plot=True) drop_list6 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89138107/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(101, 126)] high_correlated_cols(train_df[cols], plot=True) drop_list5 = high_correlated_cols(train_df[cols], plot=False) drop_list5
code
89138107/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) drop_list4
code
89138107/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(101, 126)] high_correlated_cols(train_df[cols], plot=True) drop_list5 = high_correlated_cols(train_df[cols], plot=False)
code
89138107/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) drop_list3
code
89138107/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(51, 76)] high_correlated_cols(train_df[cols], plot=True) drop_list3 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(76, 101)] high_correlated_cols(train_df[cols], plot=True) drop_list4 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(101, 126)] high_correlated_cols(train_df[cols], plot=True) drop_list5 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(126, 151)] high_correlated_cols(train_df[cols], plot=True) drop_list6 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(151, 176)] high_correlated_cols(train_df[cols], plot=True) drop_list7 = high_correlated_cols(train_df[cols], plot=False) drop_list7
code
89138107/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) drop_list1
code
89138107/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) def high_correlated_cols(dataframe, plot=True, corr_th=0.85): corr = dataframe.corr() cor_matrix = corr.abs() upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)] if plot: sns.set(rc={'figure.figsize': (25, 25)}) return drop_list cols = ['V' + str(x) for x in range(1, 26)] high_correlated_cols(train_df[cols], plot=True) drop_list1 = high_correlated_cols(train_df[cols], plot=False) cols = ['V' + str(x) for x in range(26, 51)] high_correlated_cols(train_df[cols], plot=True) drop_list2 = high_correlated_cols(train_df[cols], plot=False) drop_list2
code
32073488/cell_21
[ "text_plain_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences maxlen = 130 x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) for i in x_train[0:10]: print(len(i))
code
32073488/cell_13
[ "text_plain_output_1.png" ]
from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() print(type(word_index)) print(len(word_index))
code
32073488/cell_9
[ "image_output_1.png" ]
d = x_train[0] print(len(d))
code
32073488/cell_25
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import SimpleRNN, Dense, Activation from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() num_words = 15000 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words) maxlen = 130 x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) rnn = Sequential() rnn.add(Embedding(num_words, 32, input_length=len(x_train[0]))) rnn.add(SimpleRNN(16, input_shape=(num_words, maxlen), return_sequences=False, activation='relu')) rnn.add(Dense(1)) rnn.add(Activation('sigmoid')) rnn.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) history = rnn.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=6, batch_size=128, verbose=1) score = rnn.evaluate(x_test, y_test) print('Accuracy: %', score[1] * 100)
code
32073488/cell_4
[ "text_plain_output_1.png" ]
import numpy as np unique, counts = np.unique(y_train, return_counts=True) print('Y Train distrubution:', dict(zip(unique, counts)))
code
32073488/cell_23
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import SimpleRNN, Dense, Activation from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() num_words = 15000 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words) maxlen = 130 x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) rnn = Sequential() rnn.add(Embedding(num_words, 32, input_length=len(x_train[0]))) rnn.add(SimpleRNN(16, input_shape=(num_words, maxlen), return_sequences=False, activation='relu')) rnn.add(Dense(1)) rnn.add(Activation('sigmoid')) print(rnn.summary()) rnn.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
code
32073488/cell_20
[ "image_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences maxlen = 130 x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) print(x_train[5])
code
32073488/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns plt.figure() sns.countplot(y_train) plt.xlabel('Classes') plt.ylabel('Freq') plt.title('y train') plt.show()
code
32073488/cell_26
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import SimpleRNN, Dense, Activation from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences import matplotlib.pyplot as plt import seaborn as sns (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) review_len_train = [] review_len_test = [] for i, ii in zip(x_train, x_test): review_len_train.append(len(i)) review_len_test.append(len(ii)) word_index = imdb.get_word_index() num_words = 15000 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words) maxlen = 130 x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) rnn = Sequential() rnn.add(Embedding(num_words, 32, input_length=len(x_train[0]))) rnn.add(SimpleRNN(16, input_shape=(num_words, maxlen), return_sequences=False, activation='relu')) rnn.add(Dense(1)) rnn.add(Activation('sigmoid')) rnn.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) history = rnn.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=6, batch_size=128, verbose=1) plt.figure() plt.plot(history.history['accuracy'], label='train') plt.plot(history.history['val_accuracy'], label='test') plt.title('accuracy') plt.ylabel('accuracy') plt.xlabel('epochs') plt.legend() plt.show()
code
32073488/cell_2
[ "text_plain_output_1.png" ]
from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3)
code
32073488/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns review_len_train = [] review_len_test = [] for i, ii in zip(x_train, x_test): review_len_train.append(len(i)) review_len_test.append(len(ii)) sns.distplot(review_len_train, hist_kws={'alpha': 0.3}) sns.distplot(review_len_test, hist_kws={'alpha': 0.3}) plt.show()
code
32073488/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy import stats from keras.datasets import imdb from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers.embeddings import Embedding from keras.layers import SimpleRNN, Dense, Activation
code
32073488/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns plt.figure() sns.countplot(y_test) plt.xlabel('Classes') plt.ylabel('Freq') plt.title('y test') plt.show()
code
32073488/cell_8
[ "image_output_1.png" ]
d = x_train[0] print(x_train[0])
code
32073488/cell_15
[ "image_output_1.png" ]
from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() for keys, values in word_index.items(): if values == 4: print(keys)
code
32073488/cell_16
[ "image_output_1.png" ]
from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() for keys, values in word_index.items(): if values == 123: print(keys)
code
32073488/cell_3
[ "text_plain_output_1.png" ]
import numpy as np print('Y Train Values:', np.unique(y_train)) print('Y Test Values:', np.unique(y_test))
code
32073488/cell_17
[ "text_plain_output_1.png" ]
from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() def whatItSay(index=9): reverse_index = dict([(value, key) for key, value in word_index.items()]) decode_review = ' '.join([reverse_index.get(i - 3, '!') for i in x_train[index]]) print(decode_review) print(y_train[index]) return decode_review decoded_review = whatItSay()
code
32073488/cell_24
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import SimpleRNN, Dense, Activation from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() num_words = 15000 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words) maxlen = 130 x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) rnn = Sequential() rnn.add(Embedding(num_words, 32, input_length=len(x_train[0]))) rnn.add(SimpleRNN(16, input_shape=(num_words, maxlen), return_sequences=False, activation='relu')) rnn.add(Dense(1)) rnn.add(Activation('sigmoid')) rnn.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) history = rnn.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=6, batch_size=128, verbose=1)
code
32073488/cell_14
[ "text_plain_output_1.png" ]
from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() for keys, values in word_index.items(): if values == 1: print(keys)
code
32073488/cell_22
[ "text_plain_output_1.png" ]
from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) word_index = imdb.get_word_index() def whatItSay(index=9): reverse_index = dict([(value, key) for key, value in word_index.items()]) decode_review = ' '.join([reverse_index.get(i - 3, '!') for i in x_train[index]]) return decode_review decoded_review = whatItSay() decoded_review = whatItSay(5)
code
32073488/cell_27
[ "text_plain_output_1.png" ]
from keras.datasets import imdb from keras.layers import SimpleRNN, Dense, Activation from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences import matplotlib.pyplot as plt import seaborn as sns (x_train, y_train), (x_test, y_test) = imdb.load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) review_len_train = [] review_len_test = [] for i, ii in zip(x_train, x_test): review_len_train.append(len(i)) review_len_test.append(len(ii)) word_index = imdb.get_word_index() num_words = 15000 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words) maxlen = 130 x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) rnn = Sequential() rnn.add(Embedding(num_words, 32, input_length=len(x_train[0]))) rnn.add(SimpleRNN(16, input_shape=(num_words, maxlen), return_sequences=False, activation='relu')) rnn.add(Dense(1)) rnn.add(Activation('sigmoid')) rnn.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) history = rnn.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=6, batch_size=128, verbose=1) plt.figure() plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.title('acc') plt.ylabel('Acc') plt.xlabel('epochs') plt.legend() plt.show()
code
32073488/cell_12
[ "text_plain_output_1.png" ]
from scipy import stats import numpy as np unique, counts = np.unique(y_train, return_counts=True) unique, counts = np.unique(y_test, return_counts=True) review_len_train = [] review_len_test = [] for i, ii in zip(x_train, x_test): review_len_train.append(len(i)) review_len_test.append(len(ii)) print('Train mean:', np.mean(review_len_train)) print('Train median:', np.median(review_len_train)) print('Train mode:', stats.mode(review_len_train))
code
32073488/cell_5
[ "text_plain_output_1.png" ]
import numpy as np unique, counts = np.unique(y_train, return_counts=True) unique, counts = np.unique(y_test, return_counts=True) print('Y Test distrubution:', dict(zip(unique, counts)))
code
73071444/cell_42
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data = train_data.drop(['Cabin', 'Ticket', 'PassengerId'], axis=1) test_data = test_data.drop(['Cabin', 'Ticket'], axis=1) combine = [train_data, test_data] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) (pd.crosstab(train_data['Title'], train_data['Sex']), pd.crosstab(test_data['Title'], test_data['Sex'])) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') pd.crosstab(train_data['Title'], train_data['Sex']) train_data = train_data.drop(['Name'], axis=1) test_data = test_data.drop(['Name'], axis=1) combine = [train_data, test_data] train_data = pd.concat([train_data, pd.get_dummies(train_data['Sex'])], axis=1) test_data = pd.concat([test_data, pd.get_dummies(test_data['Sex'])], axis=1) train_data
code
73071444/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data[['Pclass', 'Survived']].groupby('Pclass', as_index=False).mean()
code
73071444/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data.describe(include=['O'])
code
73071444/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') age_plt = sns.FacetGrid(train_data, col='Survived') age_plt.map(plt.hist, 'Age', bins=20) class_age_plt = sns.FacetGrid(train_data, col='Survived', row='Pclass', height=2.8, aspect=2.0) class_age_plt.map(plt.hist, 'Age', bins=20) cat = sns.FacetGrid(train_data, row='Embarked', height=2.2, aspect=1.6) cat.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') fare_plt = sns.FacetGrid(train_data, col='Survived') fare_plt.map(plt.hist, 'Fare') fare_embarked_plt = sns.FacetGrid(train_data, col='Survived', row='Embarked') fare_embarked_plt.map(sns.barplot, 'Sex', 'Fare', ci=None)
code
73071444/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data.head()
code
73071444/cell_34
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data = train_data.drop(['Cabin', 'Ticket', 'PassengerId'], axis=1) test_data = test_data.drop(['Cabin', 'Ticket'], axis=1) combine = [train_data, test_data] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) (pd.crosstab(train_data['Title'], train_data['Sex']), pd.crosstab(test_data['Title'], test_data['Sex']))
code
73071444/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') age_plt = sns.FacetGrid(train_data, col='Survived') age_plt.map(plt.hist, 'Age', bins=20) class_age_plt = sns.FacetGrid(train_data, col='Survived', row='Pclass', height=2.8, aspect=2.0) class_age_plt.map(plt.hist, 'Age', bins=20)
code
73071444/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') test_data.info()
code
73071444/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data = train_data.drop(['Cabin', 'Ticket', 'PassengerId'], axis=1) test_data = test_data.drop(['Cabin', 'Ticket'], axis=1) train_data = train_data.drop(['Name'], axis=1) test_data = test_data.drop(['Name'], axis=1) combine = [train_data, test_data] train_data.head()
code
73071444/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') age_plt = sns.FacetGrid(train_data, col='Survived') age_plt.map(plt.hist, 'Age', bins=20)
code
73071444/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') (train_data['Ticket'].unique().shape, test_data['Ticket'].unique().shape)
code
73071444/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data.describe()
code
73071444/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data[['SibSp', 'Survived']].groupby('SibSp', as_index=False).mean().sort_values(by='Survived', ascending=False)
code
73071444/cell_16
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data[['Parch', 'Survived']].groupby('Parch', as_index=False).mean().sort_values(by='Survived', ascending=False)
code
73071444/cell_38
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data = train_data.drop(['Cabin', 'Ticket', 'PassengerId'], axis=1) test_data = test_data.drop(['Cabin', 'Ticket'], axis=1) train_data[['Title', 'Survived']].groupby('Title', as_index=False).mean()
code
73071444/cell_46
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data = train_data.drop(['Cabin', 'Ticket', 'PassengerId'], axis=1) test_data = test_data.drop(['Cabin', 'Ticket'], axis=1) combine = [train_data, test_data] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) (pd.crosstab(train_data['Title'], train_data['Sex']), pd.crosstab(test_data['Title'], test_data['Sex'])) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') pd.crosstab(train_data['Title'], train_data['Sex']) train_data = train_data.drop(['Name'], axis=1) test_data = test_data.drop(['Name'], axis=1) combine = [train_data, test_data] train_data = pd.concat([train_data, pd.get_dummies(train_data['Sex'])], axis=1) test_data = pd.concat([test_data, pd.get_dummies(test_data['Sex'])], axis=1) train_data train_data.drop(['female'], axis=1, inplace=True) test_data.drop(['female'], axis=1, inplace=True) train_data = pd.concat([train_data, pd.get_dummies(train_data['Title'])], axis=1) test_data = pd.concat([test_data, pd.get_dummies(test_data['Title'])], axis=1) combine = [train_data, test_data] title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_data.head()
code
73071444/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') age_plt = sns.FacetGrid(train_data, col='Survived') age_plt.map(plt.hist, 'Age', bins=20) class_age_plt = sns.FacetGrid(train_data, col='Survived', row='Pclass', height=2.8, aspect=2.0) class_age_plt.map(plt.hist, 'Age', bins=20) cat = sns.FacetGrid(train_data, row='Embarked', height=2.2, aspect=1.6) cat.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') fare_plt = sns.FacetGrid(train_data, col='Survived') fare_plt.map(plt.hist, 'Fare')
code
73071444/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data[['Embarked', 'Survived']].groupby('Embarked', as_index=False).mean().sort_values(by='Survived', ascending=False)
code
73071444/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') age_plt = sns.FacetGrid(train_data, col='Survived') age_plt.map(plt.hist, 'Age', bins=20) class_age_plt = sns.FacetGrid(train_data, col='Survived', row='Pclass', height=2.8, aspect=2.0) class_age_plt.map(plt.hist, 'Age', bins=20) cat = sns.FacetGrid(train_data, row='Embarked', height=2.2, aspect=1.6) cat.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')
code
73071444/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean()
code
73071444/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/titanic/train.csv') test_data = pd.read_csv('../input/titanic/test.csv') train_data.info()
code