path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32063375/cell_5
[ "image_output_1.png" ]
import pandas as pd hp = pd.read_csv('../input/london-house-prices/hpdemo.csv') hp
code
121148301/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df
code
121148301/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.tail()
code
121148301/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # data visualization library import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df.nunique() top_10_markets = df['Market'].value_counts().nlargest(10) plt.xticks(rotation=60) plt.figure(figsize=(15,6)) scatt = sns.scatterplot(data=df, x='Price', y='Change', hue='Pricing') plt.ylabel('Price Change') plt.title('Prices VS Prices Change') plt.show() top_10_unit = df['Unit'].value_counts().nlargest(10) plt.figure(figsize=(15, 6)) sns.countplot(data=df, x='Unit', order=top_10_unit.index) plt.xticks(rotation=60) plt.title('Top 10 Units on Sudanese Agriculture Markets') plt.show()
code
121148301/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum()
code
121148301/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum()
code
121148301/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # data visualization library import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df.nunique() top_10_markets = df['Market'].value_counts().nlargest(10) plt.xticks(rotation=60) plt.figure(figsize=(15, 6)) scatt = sns.scatterplot(data=df, x='Price', y='Change', hue='Pricing') plt.ylabel('Price Change') plt.title('Prices VS Prices Change') plt.show()
code
121148301/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from matplotlib import style style.use('ggplot') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121148301/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum()
code
121148301/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # data visualization library import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df.nunique() top_10_markets = df['Market'].value_counts().nlargest(10) plt.xticks(rotation=60) plt.figure(figsize=(15, 6)) sns.countplot(data=df, x='Pricing') plt.title('Markets Quantities ') plt.xlabel('Quantities') plt.show()
code
121148301/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df.nunique()
code
121148301/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # data visualization library import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df.nunique() top_10_markets = df['Market'].value_counts().nlargest(10) plt.figure(figsize=(15, 6)) sns.countplot(data=df, order=top_10_markets.index, y='Market') plt.title('Top 10 Markets') plt.show()
code
121148301/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.head()
code
121148301/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # data visualization library import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df.nunique() top_10_markets = df['Market'].value_counts().nlargest(10) plt.figure(figsize=(15, 6)) df['Product'].value_counts().nlargest(10).plot(kind='bar') plt.xticks(rotation=60) plt.xlabel('Products') plt.ylabel('Transactions Counts') plt.title('Counts of Top 10 Products') plt.show()
code
121148301/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.duplicated().sum() df.isnull().sum() df.isnull().sum() df.describe()
code
121148301/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sudan-food-and-agriculture/prices_oct_nov_2022.csv', index_col=0, parse_dates=['Date']) df.info()
code
32068527/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068527/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') X_train = train[['Id']] test['Id'] = test['ForecastId'] X_test = test[['Id']] y_train_cc = train[['ConfirmedCases']] y_train_ft = train[['Fatalities']] X_tr = np.array_split(X_train, 313) y_cc = np.array_split(y_train_cc, 313) y_ft = np.array_split(y_train_ft, 313) X_te = np.array_split(X_test, 313) a = np.max(X_tr[0]).values b = a - 71 b = b[0] from sklearn.linear_model import Lasso from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(3) y_pred_cc = [] for i in range(313): X_tr[i] = poly.fit_transform(X_tr[i]) X_te[i] = poly.fit_transform(X_te[i]) model = Lasso(alpha=0.1) model.fit(X_tr[i], y_cc[i]) y_pr_cc = model.predict(X_te[i]) y_cc[i] = y_cc[i][71:] y_pr_cc = y_pr_cc[b:] y_pr_cc = np.append(y_cc[i], y_pr_cc) y_pred_cc.append(y_pr_cc) y_pred_ft = [] for i in range(313): model = Lasso() model.fit(X_tr[i], y_ft[i]) y_pr_ft = model.predict(X_te[i]) y_ft[i] = y_ft[i][71:] y_pr_ft = y_pr_ft[b:] y_pr_ft = np.append(y_ft[i], y_pr_ft) y_pred_ft.append(y_pr_ft) y_pred_ft_1 = [item for sublist in y_pred_ft for item in sublist] y_pred_cc_1 = [item for sublist in y_pred_cc for item in sublist] result = pd.DataFrame({'ForecastId': submission.ForecastId, 'ConfirmedCases': y_pred_cc_1, 'Fatalities': y_pred_ft_1}) result.to_csv('/kaggle/working/submission.csv', index=False) data = pd.read_csv('/kaggle/working/submission.csv') data.head(50)
code
32068527/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') plt.figure(figsize=(20, 10)) plt.plot(train.Id, train.ConfirmedCases) plt.title('Confirmed Cases') plt.show()
code
32068527/cell_15
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') X_train = train[['Id']] test['Id'] = test['ForecastId'] X_test = test[['Id']] y_train_cc = train[['ConfirmedCases']] y_train_ft = train[['Fatalities']] X_tr = np.array_split(X_train, 313) y_cc = np.array_split(y_train_cc, 313) y_ft = np.array_split(y_train_ft, 313) X_te = np.array_split(X_test, 313) a = np.max(X_tr[0]).values b = a - 71 b = b[0] from sklearn.linear_model import Lasso from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(3) y_pred_cc = [] for i in range(313): X_tr[i] = poly.fit_transform(X_tr[i]) X_te[i] = poly.fit_transform(X_te[i]) model = Lasso(alpha=0.1) model.fit(X_tr[i], y_cc[i]) y_pr_cc = model.predict(X_te[i]) y_cc[i] = y_cc[i][71:] y_pr_cc = y_pr_cc[b:] y_pr_cc = np.append(y_cc[i], y_pr_cc) y_pred_cc.append(y_pr_cc)
code
32068527/cell_16
[ "image_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') X_train = train[['Id']] test['Id'] = test['ForecastId'] X_test = test[['Id']] y_train_cc = train[['ConfirmedCases']] y_train_ft = train[['Fatalities']] X_tr = np.array_split(X_train, 313) y_cc = np.array_split(y_train_cc, 313) y_ft = np.array_split(y_train_ft, 313) X_te = np.array_split(X_test, 313) a = np.max(X_tr[0]).values b = a - 71 b = b[0] from sklearn.linear_model import Lasso from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(3) y_pred_cc = [] for i in range(313): X_tr[i] = poly.fit_transform(X_tr[i]) X_te[i] = poly.fit_transform(X_te[i]) model = Lasso(alpha=0.1) model.fit(X_tr[i], y_cc[i]) y_pr_cc = model.predict(X_te[i]) y_cc[i] = y_cc[i][71:] y_pr_cc = y_pr_cc[b:] y_pr_cc = np.append(y_cc[i], y_pr_cc) y_pred_cc.append(y_pr_cc) y_pred_ft = [] for i in range(313): model = Lasso() model.fit(X_tr[i], y_ft[i]) y_pr_ft = model.predict(X_te[i]) y_ft[i] = y_ft[i][71:] y_pr_ft = y_pr_ft[b:] y_pr_ft = np.append(y_ft[i], y_pr_ft) y_pred_ft.append(y_pr_ft)
code
89127002/cell_33
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix, classification_report, cohen_kappa_score from tensorflow.keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D from tensorflow.keras.layers import Dense, Input, Activation, Embedding, Dropout import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) plt.rcParams['figure.figsize'] = [12, 5] train = pd.read_csv('../input/drugsComTrain_raw.csv') test = pd.read_csv('../input/drugsComTest_raw.csv') data = pad_sequences(sequences, maxlen=sequence_length) ratings = train['rating'].values labels = 1.0 * (ratings >= 8) + 1.0 * (ratings >= 5) hot_labels = to_categorical(labels) hot_labels[:3] VALIDATION_SPLIT = 0.25 N = int(VALIDATION_SPLIT * data.shape[0]) indices = np.arange(data.shape[0]) np.random.shuffle(indices) data = data[indices] hot_labels = hot_labels[indices] train_data = data[:-N] train_cat = hot_labels[:-N] val_data = data[-N:] val_cat = hot_labels[-N:] embedding_dim = 100 model = Sequential([Embedding(max_features + 1, embedding_dim), Dropout(0.25), Conv1D(128, 7, padding='valid', activation='relu', strides=3), GlobalAveragePooling1D(), Dropout(0.25), Dense(128, activation='relu'), Dense(3, activation='softmax')]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) model.summary() acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] e = np.arange(len(acc)) + 1 pred_labels = np.argmax(model.predict(val_data), axis=1) val_labels = np.argmax(val_cat, axis=1) cr = classification_report(val_labels, pred_labels) k = cohen_kappa_score(val_labels, pred_labels) print(f"Cohen's kappa (linear) = {k:.4f}") k2 = cohen_kappa_score(val_labels, pred_labels, weights='quadratic') print(f"Cohen's kappa (quadratic) = {k2:.4f}")
code
89127002/cell_6
[ "text_plain_output_1.png" ]
import os print(os.listdir('../input'))
code
89127002/cell_29
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix, classification_report, cohen_kappa_score from tensorflow.keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D from tensorflow.keras.layers import Dense, Input, Activation, Embedding, Dropout import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) plt.rcParams['figure.figsize'] = [12, 5] train = pd.read_csv('../input/drugsComTrain_raw.csv') test = pd.read_csv('../input/drugsComTest_raw.csv') data = pad_sequences(sequences, maxlen=sequence_length) ratings = train['rating'].values labels = 1.0 * (ratings >= 8) + 1.0 * (ratings >= 5) hot_labels = to_categorical(labels) hot_labels[:3] VALIDATION_SPLIT = 0.25 N = int(VALIDATION_SPLIT * data.shape[0]) indices = np.arange(data.shape[0]) np.random.shuffle(indices) data = data[indices] hot_labels = hot_labels[indices] train_data = data[:-N] train_cat = hot_labels[:-N] val_data = data[-N:] val_cat = hot_labels[-N:] embedding_dim = 100 model = Sequential([Embedding(max_features + 1, embedding_dim), Dropout(0.25), Conv1D(128, 7, padding='valid', activation='relu', strides=3), GlobalAveragePooling1D(), Dropout(0.25), Dense(128, activation='relu'), Dense(3, activation='softmax')]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) model.summary() acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] e = np.arange(len(acc)) + 1 pred_labels = np.argmax(model.predict(val_data), axis=1) val_labels = np.argmax(val_cat, axis=1) cr = classification_report(val_labels, pred_labels) print(cr)
code
89127002/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/drugsComTrain_raw.csv') test = pd.read_csv('../input/drugsComTest_raw.csv') ratings = train['rating'].values labels = 1.0 * (ratings >= 8) + 1.0 * (ratings >= 5) hot_labels = to_categorical(labels) print('Shape of label tensor:', hot_labels.shape) hot_labels[:3]
code
89127002/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/drugsComTrain_raw.csv') test = pd.read_csv('../input/drugsComTest_raw.csv') train.head()
code
89127002/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import tensorflow as tf import time import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Input, Activation, Embedding, Dropout from tensorflow.keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D from tensorflow.keras.initializers import Constant from sklearn.metrics import confusion_matrix, classification_report, cohen_kappa_score import os print('tensorflow version:', tf.__version__)
code
89127002/cell_31
[ "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix, classification_report, cohen_kappa_score from tensorflow.keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D from tensorflow.keras.layers import Dense, Input, Activation, Embedding, Dropout import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) plt.rcParams['figure.figsize'] = [12, 5] train = pd.read_csv('../input/drugsComTrain_raw.csv') test = pd.read_csv('../input/drugsComTest_raw.csv') data = pad_sequences(sequences, maxlen=sequence_length) ratings = train['rating'].values labels = 1.0 * (ratings >= 8) + 1.0 * (ratings >= 5) hot_labels = to_categorical(labels) hot_labels[:3] VALIDATION_SPLIT = 0.25 N = int(VALIDATION_SPLIT * data.shape[0]) indices = np.arange(data.shape[0]) np.random.shuffle(indices) data = data[indices] hot_labels = hot_labels[indices] train_data = data[:-N] train_cat = hot_labels[:-N] val_data = data[-N:] val_cat = hot_labels[-N:] embedding_dim = 100 model = Sequential([Embedding(max_features + 1, embedding_dim), Dropout(0.25), Conv1D(128, 7, padding='valid', activation='relu', strides=3), GlobalAveragePooling1D(), Dropout(0.25), Dense(128, activation='relu'), Dense(3, activation='softmax')]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) model.summary() acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] e = np.arange(len(acc)) + 1 pred_labels = np.argmax(model.predict(val_data), axis=1) val_labels = np.argmax(val_cat, axis=1) cr = classification_report(val_labels, pred_labels) cm = confusion_matrix(val_labels, pred_labels).T print(cm)
code
89127002/cell_24
[ "text_plain_output_1.png" ]
history = model.fit(train_data, train_cat, batch_size=128, epochs=10, verbose=0, validation_data=(val_data, val_cat))
code
89127002/cell_14
[ "text_plain_output_1.png" ]
data = pad_sequences(sequences, maxlen=sequence_length) print('Shape of data tensor:', data.shape)
code
89127002/cell_22
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D from tensorflow.keras.layers import Dense, Input, Activation, Embedding, Dropout embedding_dim = 100 model = Sequential([Embedding(max_features + 1, embedding_dim), Dropout(0.25), Conv1D(128, 7, padding='valid', activation='relu', strides=3), GlobalAveragePooling1D(), Dropout(0.25), Dense(128, activation='relu'), Dense(3, activation='softmax')]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) model.summary()
code
89127002/cell_27
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) plt.rcParams['figure.figsize'] = [12, 5] train = pd.read_csv('../input/drugsComTrain_raw.csv') test = pd.read_csv('../input/drugsComTest_raw.csv') data = pad_sequences(sequences, maxlen=sequence_length) ratings = train['rating'].values labels = 1.0 * (ratings >= 8) + 1.0 * (ratings >= 5) hot_labels = to_categorical(labels) hot_labels[:3] VALIDATION_SPLIT = 0.25 N = int(VALIDATION_SPLIT * data.shape[0]) indices = np.arange(data.shape[0]) np.random.shuffle(indices) data = data[indices] hot_labels = hot_labels[indices] train_data = data[:-N] train_cat = hot_labels[:-N] val_data = data[-N:] val_cat = hot_labels[-N:] acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] e = np.arange(len(acc)) + 1 plt.plot(e, acc, label='train') plt.plot(e, val_acc, label='validation') plt.title('Training and validation accuracy') plt.xlabel('Epoch') plt.grid() plt.legend() plt.figure() plt.plot(e, loss, label='train') plt.plot(e, val_loss, label='validation') plt.title('Training and validation loss') plt.xlabel('Epoch') plt.grid() plt.legend() plt.show()
code
89127002/cell_12
[ "text_plain_output_1.png" ]
max_features = 5000 sequence_length = 200 samples = train['review'] tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(samples) sequences = tokenizer.texts_to_sequences(samples) word_index = tokenizer.word_index print(f'Found {len(word_index)} unique tokens.')
code
2039737/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
2039737/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] test_df.describe()
code
2039737/cell_30
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Name', 'Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess_ages = np.zeros((2, 3)) for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j + 1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[dataset.Age.isnull() & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j] dataset['Age'] = dataset['Age'].astype(int) for dataset in combine: dataset.loc[dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[dataset['Age'] > 64, 'Age'] train_df.head()
code
2039737/cell_33
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Name', 'Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess_ages = np.zeros((2, 3)) for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j + 1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[dataset.Age.isnull() & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j] dataset['Age'] = dataset['Age'].astype(int) for dataset in combine: dataset.loc[dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[dataset['Age'] > 64, 'Age'] train_df = train_df.drop(['AgeBand'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
2039737/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df.head()
code
2039737/cell_29
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Name', 'Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess_ages = np.zeros((2, 3)) for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j + 1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[dataset.Age.isnull() & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j] dataset['Age'] = dataset['Age'].astype(int) train_df['AgeBand'] = pd.cut(train_df['Age'], 5) train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)
code
2039737/cell_26
[ "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Name', 'Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) train_df.head()
code
2039737/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] test_df.describe(include=['O'])
code
2039737/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] grid = sns.FacetGrid(train_df, col='Survived') grid.map(plt.hist, 'Age', bins=20) grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=0.5, bins=20) grid.add_legend() grid = sns.FacetGrid(train_df, row='Embarked', size=2.2, aspect=1.6) grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') grid.add_legend() grid = sns.FacetGrid(train_df, row='Embarked', col='Survived', size=2.2, aspect=1.6) grid.map(sns.barplot, 'Sex', 'Fare', alpha=0.5, ci=None) grid.add_legend()
code
2039737/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] test_df.head()
code
2039737/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] grid = sns.FacetGrid(train_df, col='Survived') grid.map(plt.hist, 'Age', bins=20) grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=0.5, bins=20) grid.add_legend() grid = sns.FacetGrid(train_df, row='Embarked', size=2.2, aspect=1.6) grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') grid.add_legend()
code
2039737/cell_28
[ "image_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Name', 'Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess_ages = np.zeros((2, 3)) for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j + 1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[dataset.Age.isnull() & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j] dataset['Age'] = dataset['Age'].astype(int) train_df.head()
code
2039737/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df.describe()
code
2039737/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
2039737/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] grid = sns.FacetGrid(train_df, col='Survived') grid.map(plt.hist, 'Age', bins=20)
code
2039737/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from subprocess import check_output import pandas as pd import numpy as np import random as rnd import sklearn.linear_model import sklearn.svm import sklearn.ensemble import sklearn.neighbors import sklearn.naive_bayes import sklearn.tree import sklearn.neural_network from subprocess import check_output import seaborn as sns import matplotlib.pyplot as plt import os train_set_size = 891 valid_set_size = 0 print(os.path.dirname(os.getcwd()) + ':', os.listdir(os.path.dirname(os.getcwd()))) print(os.getcwd() + ':', os.listdir(os.getcwd()))
code
2039737/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] grid = sns.FacetGrid(train_df, col='Survived') grid.map(plt.hist, 'Age', bins=20) grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=0.5, bins=20) grid.add_legend()
code
2039737/cell_31
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Name', 'Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int) guess_ages = np.zeros((2, 3)) for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j + 1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[dataset.Age.isnull() & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j] dataset['Age'] = dataset['Age'].astype(int) for dataset in combine: dataset.loc[dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[dataset['Age'] > 64, 'Age'] train_df = train_df.drop(['AgeBand'], axis=1) combine = [train_df, test_df] train_df.head()
code
2039737/cell_24
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Name', 'Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] print('train_df = ', train_df.shape) print('test_df = ', test_df.shape)
code
2039737/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
2039737/cell_22
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df.head()
code
2039737/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df.describe(include=['O'])
code
2039737/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
2039737/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df.info() print('_' * 40) test_df.info()
code
72116842/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.sample(10) df.shape sns.countplot(df['target'])
code
72116842/cell_30
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import SnowballStemmer from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.sample(10) df.shape stop_words = stopwords.words('english') stemmer = SnowballStemmer('english') cleaning = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' def preprocess(text, stem=False): text = re.sub(cleaning, ' ', str(text).lower()).strip() tokens = [] for token in text.split(): if token not in stop_words: if stem: tokens.append(stemmer.stem(token)) else: tokens.append(token) return ' '.join(tokens) df.text = df.text.apply(lambda x: preprocess(x, stem=True)) x = df.text y = df.target x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42) df
code
72116842/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.info()
code
72116842/cell_29
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import SnowballStemmer from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.sample(10) df.shape stop_words = stopwords.words('english') stemmer = SnowballStemmer('english') cleaning = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' def preprocess(text, stem=False): text = re.sub(cleaning, ' ', str(text).lower()).strip() tokens = [] for token in text.split(): if token not in stop_words: if stem: tokens.append(stemmer.stem(token)) else: tokens.append(token) return ' '.join(tokens) df.text = df.text.apply(lambda x: preprocess(x, stem=True)) x = df.text y = df.target print(x.shape, y.shape) x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42)
code
72116842/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.sample(10)
code
72116842/cell_16
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.sample(10) df.shape df['text'].iloc[0]
code
72116842/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.sample(10) df.shape df['text'].iloc[1]
code
72116842/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.head()
code
72116842/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv' df = pd.read_csv(path, header=None) df.columns = ['target', 'ids', 'date', 'flag', 'user', 'text'] df.sample(10) df.shape
code
2003059/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test (1).csv') def simplify_ages(df): df.Age = df.Age.fillna(-0.5) bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120) group_names = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] categories = pd.cut(df.Age, bins, labels=group_names) df.Age = categories return df def simplify_cabins(df): df.Cabin = df.Cabin.fillna('N') df.Cabin = df.Cabin.apply(lambda x: x[0]) return df def simplify_fares(df): df.Fare = df.Fare.fillna('-0.5') bins = (-1, 0, 8, 15, 32, 600) data_train.Fare.describe() group_names = ['Unknown', '1', '2', '3', '4', '5'] categories = pd.cut(df.Fare, bins, labels=group_names) df.Fare = categories return df def format_names(df): df['Lastname'] = df.Name.apply(lambda x: x.split('')[0]) df['Prefix'] = df.Name.apply(lambda x: x.split('')[1]) return df def drop_features(df): return df.drop(['Ticket', 'Name', 'Embarked'], axis=1) def transform_features(df): df = simplify_ages(df) df = simplify_cabins(df) df = simplify_fares(df) df = format_names(df) df = drop_features(df) return df data_train = transform_features(data_train) data_test = transform_features(data_test) data_train.head()
code
2003059/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test (1).csv') sns.barplot(x='Embarked', y='Survived', data=data_train, hue='Pclass')
code
2003059/cell_1
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test (1).csv') data_train.head(10)
code
2003059/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test (1).csv') sns.barplot(x='Pclass', y='Survived', data=data_train, hue='Sex')
code
90147643/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] print(x.shape)
code
90147643/cell_25
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns
code
90147643/cell_34
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns sba = sba[(sba['LowDoc'] == 'Y') | (sba['LowDoc'] == 'N')] len(sba[(sba['LowDoc'] != 'Y') & (sba['LowDoc'] != 'N')]) len(sba[(sba['NewExist'] != 1) & (sba['NewExist'] != 2)])
code
90147643/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) sba.head(2)
code
90147643/cell_29
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) def check_cols_with_nulls(df): cols_with_missing = [col for col in df.columns if df[col].isnull().any()] check_cols_with_nulls(sba) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns sns.countplot(x='LowDoc', data=sba)
code
90147643/cell_19
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) sba.head(2)
code
90147643/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns sba = sba[(sba['LowDoc'] == 'Y') | (sba['LowDoc'] == 'N')] len(sba[(sba['LowDoc'] != 'Y') & (sba['LowDoc'] != 'N')]) len(sba[(sba['LowDoc'] == 'Y') | (sba['LowDoc'] == 'N')])
code
90147643/cell_28
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns len(sba[(sba['LowDoc'] != 'Y') & (sba['LowDoc'] != 'N')])
code
90147643/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) print('Shape of SBA : ', sba.shape) sba[['DisbursementGross', 'SBA_Appv', 'GrAppv', 'ChgOffPrinGr', 'DisbursementDate']].head(2)
code
90147643/cell_15
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) def check_cols_with_nulls(df): cols_with_missing = [col for col in df.columns if df[col].isnull().any()] if len(cols_with_missing) == 0: print('No Missing Values') else: print(cols_with_missing) sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis') check_cols_with_nulls(sba)
code
90147643/cell_35
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) def check_cols_with_nulls(df): cols_with_missing = [col for col in df.columns if df[col].isnull().any()] check_cols_with_nulls(sba) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns sba = sba[(sba['LowDoc'] == 'Y') | (sba['LowDoc'] == 'N')] len(sba[(sba['LowDoc'] != 'Y') & (sba['LowDoc'] != 'N')]) sns.countplot(x='NewExist', data=sba)
code
90147643/cell_31
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns sba = sba[(sba['LowDoc'] == 'Y') | (sba['LowDoc'] == 'N')] len(sba[(sba['LowDoc'] != 'Y') & (sba['LowDoc'] != 'N')])
code
90147643/cell_24
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] len(sba[(sba['RevLineCr'] != 'Y') & (sba['RevLineCr'] != 'N')])
code
90147643/cell_22
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] print(x.shape)
code
90147643/cell_10
[ "text_html_output_1.png" ]
import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) print(sba.columns) print() print(sba.info())
code
90147643/cell_27
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns sba['LowDoc'].isna().sum()
code
90147643/cell_36
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd filepath = '../input/should-this-loan-be-approved-or-denied/' savepath = './' sba = pd.read_csv(filepath + 'SBAnational.csv', low_memory=False) def fixvals(val): retval = val.replace('$', '') retval = retval.replace(',', '') return retval sba = pd.read_csv(filepath + 'SBAnational.csv', converters={'DisbursementGross': fixvals, 'SBA_Appv': fixvals, 'GrAppv': fixvals, 'ChgOffPrinGr': fixvals}, parse_dates=['DisbursementDate'], low_memory=False) sba = sba.astype({'DisbursementGross': np.float64, 'SBA_Appv': np.float64, 'GrAppv': np.float64, 'ChgOffPrinGr': np.float64, 'NAICS': np.str_}) sba.to_csv(savepath + 'sba_save1.csv', index=False) cols_to_drop = ['LoanNr_ChkDgt', 'Zip', 'Bank', 'BankState', 'ApprovalDate', 'ApprovalFY', 'ChgOffDate', 'BalanceGross'] sba.drop(columns=cols_to_drop, inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.dropna(subset=['DisbursementDate'], how='all', inplace=True) x = sba[sba['DisbursementDate'].isna()] sba.drop(columns=['RevLineCr'], inplace=True) 'RevLineCR' in sba.columns sba = sba[(sba['LowDoc'] == 'Y') | (sba['LowDoc'] == 'N')] len(sba[(sba['LowDoc'] != 'Y') & (sba['LowDoc'] != 'N')]) sba = sba[(sba['NewExist'] == 1) | (sba['NewExist'] == 2)] len(sba[(sba['NewExist'] != 1) & (sba['NewExist'] != 2)])
code
90153696/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ model.predict([[6000, 3]]) model.predict([[10000, 3]]) model.predict([[6000, 4]])
code
90153696/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] X.head()
code
90153696/cell_9
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) df.head()
code
90153696/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') sns.lmplot(x='BodyFat', y='Age', data=df, ci=None)
code
90153696/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ model.predict([[6000, 3]]) model.predict([[10000, 3]]) model.predict([[6000, 4]]) y_hat = model.predict(X) y_hat dc = pd.concat([df[0:].reset_index(), pd.Series(y_hat, name='predicted')], axis='columns') dc
code
90153696/cell_20
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ model.predict([[6000, 3]]) model.predict([[10000, 3]])
code
90153696/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') sns.kdeplot(x='Weight', data=df)
code
90153696/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) len(df)
code
90153696/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_ model.predict([[6000, 3]])
code
90153696/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') sns.kdeplot(x='BodyFat', data=df)
code
90153696/cell_18
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y) model.score(X, y) model.intercept_ model.coef_
code
90153696/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df.info()
code
90153696/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd df = pd.read_csv('../input/body-fat-prediction-dataset/bodyfat.csv') df = df.drop(columns=['Neck', 'Chest', 'Hip']) X = df[['BodyFat', 'Age']] y = df['Density'] model = LinearRegression() model.fit(X, y)
code