path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
89125628/cell_51
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd from sklearn.preprocessing import StandardScaler details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]} df = pd.DataFrame(details) scaler = StandardScaler() df = scaler.fit_transform(df) df = pd.DataFrame(df) plt = df.plot.bar()
code
89125628/cell_59
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd from sklearn.preprocessing import StandardScaler details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]} df = pd.DataFrame(details) scaler = StandardScaler() df = scaler.fit_transform(df) df = pd.DataFrame(df) plt = df.plot.bar() df = pd.DataFrame([[180000, 110, 18.9, 1400], [360000, 905, 23.4, 1800], [230000, 230, 14.0, 1300], [60000, 450, 13.5, 1500]], columns=['Col A', 'Col B', 'Col C', 'Col D']) import matplotlib.pyplot as plt plt = df.plot.bar() df_max_scaled = df.copy() for column in df_max_scaled.columns: df_max_scaled[column] = df_max_scaled[column] / df_max_scaled[column].abs().max() import matplotlib.pyplot as plt plt = df_max_scaled.plot.bar()
code
89125628/cell_58
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd from sklearn.preprocessing import StandardScaler details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]} df = pd.DataFrame(details) scaler = StandardScaler() df = scaler.fit_transform(df) df = pd.DataFrame(df) plt = df.plot.bar() df = pd.DataFrame([[180000, 110, 18.9, 1400], [360000, 905, 23.4, 1800], [230000, 230, 14.0, 1300], [60000, 450, 13.5, 1500]], columns=['Col A', 'Col B', 'Col C', 'Col D']) import matplotlib.pyplot as plt plt = df.plot.bar() df_max_scaled = df.copy() for column in df_max_scaled.columns: df_max_scaled[column] = df_max_scaled[column] / df_max_scaled[column].abs().max() display(df_max_scaled)
code
129007116/cell_42
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) x = data.YearsExperience y = data.Salary w = 9909 b = 21641.1797 def linearRegression(x, w, b): pre_y = [] m = len(x) for i in range(m): f_wb = w * x[i] + b pre_y.append(f_wb) return pre_y y_pred = linearRegression(x, w, b) def SquaredErrorCost(y_pred, y): totalCost = 0 m = len(y_pred) for i in range(m): cost = (y_pred[i] - y[i]) ** 2 totalCost += cost totalCost /= 2 * m return totalCost def compute_cost(x, y, w, b): m = x.shape[0] cost = 0 for i in range(m): f_wb = w * x[i] + b cost = cost + (f_wb - y[i]) ** 2 total_cost = 1 / (2 * m) * cost return total_cost cost = SquaredErrorCost(y_pred, y) print(' Squared Error Cost :', cost) print(f' Squared Error Cost : {cost:10}') print('Squared Error Cost: {:.5e}'.format(cost))
code
129007116/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) print('Duplicate Values =', data.duplicated().sum())
code
129007116/cell_30
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
w = 9909 b = 21641.1797 print('w :', w) print('b :', b)
code
129007116/cell_29
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) x = data.YearsExperience y = data.Salary plt.title('Salary Data') plt.scatter(x, y, marker='x', c='r') plt.xlabel('Years of Experience') plt.ylabel('Salary (per year)') plt.show()
code
129007116/cell_26
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) x = data.YearsExperience y = data.Salary print('x_train data is') x
code
129007116/cell_48
[ "text_plain_output_1.png" ]
import math import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) x = data.YearsExperience y = data.Salary w = 9909 b = 21641.1797 def linearRegression(x, w, b): pre_y = [] m = len(x) for i in range(m): f_wb = w * x[i] + b pre_y.append(f_wb) return pre_y y_pred = linearRegression(x, w, b) def SquaredErrorCost(y_pred, y): totalCost = 0 m = len(y_pred) for i in range(m): cost = (y_pred[i] - y[i]) ** 2 totalCost += cost totalCost /= 2 * m return totalCost def compute_cost(x, y, w, b): m = x.shape[0] cost = 0 for i in range(m): f_wb = w * x[i] + b cost = cost + (f_wb - y[i]) ** 2 total_cost = 1 / (2 * m) * cost return total_cost def compute_gradient(x, y, w, b): m = len(x) dj_dw = w dj_db = b for i in range(m): f_wb = w * x[i] + b dj_dw_i = (f_wb - y[i]) * x[i] dj_db_i = f_wb - y[i] dj_dw += dj_dw_i dj_db += dj_db_i dj_dw /= m dj_db /= m return (dj_dw, dj_db) def gradient_descent(x, y, w_in, b_in, alpha, num_iters, cost_function, gradient_function): """ Performs gradient descent to fit w,b. Updates w,b by taking num_iters gradient steps with learning rate alpha Args: x (ndarray (m,)) : Data, m examples y (ndarray (m,)) : target values w_in,b_in (scalar): initial values of model parameters alpha (float): Learning rate num_iters (int): number of iterations to run gradient descent cost_function: function to call to produce cost gradient_function: function to call to produce gradient Returns: w (scalar): Updated value of parameter after running gradient descent b (scalar): Updated value of parameter after running gradient descent J_history (List): History of cost values p_history (list): History of parameters [w,b] """ J_history = [] p_history = [] b = b_in w = w_in for i in range(num_iters): dj_dw, dj_db = gradient_function(x, y, w, b) b = b - alpha * dj_db w = w - alpha * dj_dw if i < 100000: J_history.append(cost_function(x, y, w, b)) p_history.append([w, b]) return (w, b, J_history, p_history) w_init = 0 b_init = 0 iterations = 10000 tmp_alpha = 0.01 w_final, b_final, J_hist, p_hist = gradient_descent(x, y, w_init, b_init, tmp_alpha, iterations, compute_cost, compute_gradient) print(f'(w,b) found by gradient descent: ({w_final:8.4f},{b_final:8.4f})')
code
129007116/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) x = data.YearsExperience y = data.Salary w = 9909 b = 21641.1797 def linearRegression(x, w, b): pre_y = [] m = len(x) for i in range(m): f_wb = w * x[i] + b pre_y.append(f_wb) return pre_y y_pred = linearRegression(x, w, b) data = {'y': y, 'y_pred': y_pred, 'error': abs(y_pred - y)} df = pd.DataFrame(data) print(df)
code
129007116/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.head()
code
129007116/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
code
129007116/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.tail()
code
129007116/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum()
code
129007116/cell_14
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') print('data shape :', data.shape)
code
129007116/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') print(data.info())
code
129007116/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) x = data.YearsExperience y = data.Salary print('y_train data is') y
code
129007116/cell_36
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv') data.isnull().sum() data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) x = data.YearsExperience y = data.Salary w = 9909 b = 21641.1797 def linearRegression(x, w, b): pre_y = [] m = len(x) for i in range(m): f_wb = w * x[i] + b pre_y.append(f_wb) return pre_y y_pred = linearRegression(x, w, b) plt.plot(x, y_pred, c='b', label='Our Prediction') plt.scatter(x, y, marker='x', c='r', label='Actual Values') plt.title('Housing Prices') plt.ylabel('Price (in 1000s of dollars)') plt.xlabel('Size (1000 sqft)') plt.legend() plt.show()
code
2041173/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import warnings import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import MaxAbsScaler from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss import warnings warnings.filterwarnings('ignore') from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2041173/cell_7
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss from sklearn.preprocessing import MaxAbsScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') subm = pd.read_csv('../input/sample_submission.csv') df = pd.concat([train['comment_text'], test['comment_text']], axis=0) df = df.fillna('unknown') nrow_train = train.shape[0] vectorizer = TfidfVectorizer(stop_words='english', max_features=50000) data = vectorizer.fit_transform(df) X = MaxAbsScaler().fit_transform(data) col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] preds = np.zeros((test.shape[0], len(col))) loss = [] for i, j in enumerate(col): print('===Fit ' + j) model = LogisticRegression() model.fit(X[:nrow_train], train[j]) preds[:, i] = model.predict_proba(X[nrow_train:])[:, 1] pred_train = model.predict_proba(X[:nrow_train])[:, 1] print('log loss:', log_loss(train[j], pred_train)) loss.append(log_loss(train[j], pred_train)) print('mean column-wise log loss:', np.mean(loss)) submid = pd.DataFrame({'id': subm['id']}) submission = pd.concat([submid, pd.DataFrame(preds, columns=col)], axis=1)
code
2041173/cell_8
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss from sklearn.preprocessing import MaxAbsScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') subm = pd.read_csv('../input/sample_submission.csv') df = pd.concat([train['comment_text'], test['comment_text']], axis=0) df = df.fillna('unknown') nrow_train = train.shape[0] vectorizer = TfidfVectorizer(stop_words='english', max_features=50000) data = vectorizer.fit_transform(df) X = MaxAbsScaler().fit_transform(data) col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] preds = np.zeros((test.shape[0], len(col))) loss = [] for i, j in enumerate(col): model = LogisticRegression() model.fit(X[:nrow_train], train[j]) preds[:, i] = model.predict_proba(X[nrow_train:])[:, 1] pred_train = model.predict_proba(X[:nrow_train])[:, 1] loss.append(log_loss(train[j], pred_train)) submid = pd.DataFrame({'id': subm['id']}) submission = pd.concat([submid, pd.DataFrame(preds, columns=col)], axis=1) submission.head()
code
2041173/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test[0:10]
code
105203676/cell_21
[ "text_html_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) n_most_common_words = 8000 max_len = 130 tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) tokenizer.fit_on_texts(concated['Text'].values) sequences = tokenizer.texts_to_sequences(concated['Text'].values) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) X = pad_sequences(sequences, maxlen=max_len)
code
105203676/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') df.describe(include='object')
code
105203676/cell_25
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) epochs = 50 emb_dim = 128 batch_size = 64 labels[:2]
code
105203676/cell_33
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.callbacks import EarlyStopping from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.utils.np_utils import to_categorical import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') plt.rcParams['figure.figsize'] = (10, 4) num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) n_most_common_words = 8000 max_len = 130 tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) tokenizer.fit_on_texts(concated['Text'].values) sequences = tokenizer.texts_to_sequences(concated['Text'].values) word_index = tokenizer.word_index X = pad_sequences(sequences, maxlen=max_len) epochs = 50 emb_dim = 128 batch_size = 64 labels[:2] model = Sequential() model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1])) model.add(SpatialDropout1D(0.7)) model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7)) model.add(Dense(5, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)]) import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()
code
105203676/cell_20
[ "text_html_output_1.png" ]
from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 print(concated['LABEL'][:10]) labels = to_categorical(concated['LABEL'], num_classes=5) print(labels[:10]) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1)
code
105203676/cell_29
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) n_most_common_words = 8000 max_len = 130 tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) tokenizer.fit_on_texts(concated['Text'].values) sequences = tokenizer.texts_to_sequences(concated['Text'].values) word_index = tokenizer.word_index X = pad_sequences(sequences, maxlen=max_len) epochs = 50 emb_dim = 128 batch_size = 64 labels[:2] model = Sequential() model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1])) model.add(SpatialDropout1D(0.7)) model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7)) model.add(Dense(5, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)])
code
105203676/cell_26
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) n_most_common_words = 8000 max_len = 130 tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) tokenizer.fit_on_texts(concated['Text'].values) sequences = tokenizer.texts_to_sequences(concated['Text'].values) word_index = tokenizer.word_index X = pad_sequences(sequences, maxlen=max_len) epochs = 50 emb_dim = 128 batch_size = 64 labels[:2] model = Sequential() model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1])) model.add(SpatialDropout1D(0.7)) model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7)) model.add(Dense(5, activation='softmax'))
code
105203676/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') plt.hist(x=df['length'])
code
105203676/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105203676/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') df.info()
code
105203676/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') df['Category'].value_counts()
code
105203676/cell_28
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) n_most_common_words = 8000 max_len = 130 tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) tokenizer.fit_on_texts(concated['Text'].values) sequences = tokenizer.texts_to_sequences(concated['Text'].values) word_index = tokenizer.word_index X = pad_sequences(sequences, maxlen=max_len) epochs = 50 emb_dim = 128 batch_size = 64 labels[:2] model = Sequential() model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1])) model.add(SpatialDropout1D(0.7)) model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7)) model.add(Dense(5, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) print(model.summary())
code
105203676/cell_8
[ "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') print(df['Category'].value_counts()) sns.countplot(data=df, x='Category', palette='RdBu') plt.title('The Distribution of Category')
code
105203676/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') df[['length', 'polarity', 'subjectivity']]
code
105203676/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') plt.rcParams['figure.figsize'] = (10, 4) plt.subplot(1, 2, 1) sns.distplot(df['polarity']) plt.subplot(1, 2, 2) sns.distplot(df['subjectivity']) plt.suptitle('Distribution of Polarity and Subjectivity') plt.show()
code
105203676/cell_35
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) n_most_common_words = 8000 max_len = 130 tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) tokenizer.fit_on_texts(concated['Text'].values) sequences = tokenizer.texts_to_sequences(concated['Text'].values) word_index = tokenizer.word_index X = pad_sequences(sequences, maxlen=max_len) epochs = 50 emb_dim = 128 batch_size = 64 labels[:2] model = Sequential() model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1])) model.add(SpatialDropout1D(0.7)) model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7)) model.add(Dense(5, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)]) accr = model.evaluate(X_test, y_test) txt = ['Rafael Nadal extended his Grand Slam winning streak to 19 matches with a 3-6, 7-5, 3-6, 7-5, 7-6 (10-4) victory over Taylor Fritz'] seq = tokenizer.texts_to_sequences(txt) padded = pad_sequences(seq, maxlen=max_len) pred = model.predict(padded) labels = ['sport', 'business', 'politics', 'entertainment', 'tech'] print(pred, labels[np.argmax(pred)])
code
105203676/cell_31
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.utils.np_utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') num_of_categories = 45000 shuffled = df.reindex(np.random.permutation(df.index)) sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories] business = shuffled[shuffled['Category'] == 'business'][:num_of_categories] politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories] entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories] tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories] concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True) concated = concated.reindex(np.random.permutation(concated.index)) concated['LABEL'] = 0 concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0 concated.loc[concated['Category'] == 'business', 'LABEL'] = 1 concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2 concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3 concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4 labels = to_categorical(concated['LABEL'], num_classes=5) if 'Category' in concated.keys(): concated.drop(['Category'], axis=1) n_most_common_words = 8000 max_len = 130 tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) tokenizer.fit_on_texts(concated['Text'].values) sequences = tokenizer.texts_to_sequences(concated['Text'].values) word_index = tokenizer.word_index X = pad_sequences(sequences, maxlen=max_len) epochs = 50 emb_dim = 128 batch_size = 64 labels[:2] model = Sequential() model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1])) model.add(SpatialDropout1D(0.7)) model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7)) model.add(Dense(5, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)]) accr = model.evaluate(X_test, y_test) print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0], accr[1]))
code
105203676/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') df[['length', 'polarity', 'Text']]
code
105203676/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv') print('Shpe of Data', df.shape) df.head(10)
code
32074163/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.info()
code
32074163/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts() df_tmp.isnull().sum() pd.api.types.is_string_dtype(df_tmp['UsageBand']) df_tmp.UsageBand for label, content in df_tmp.items(): if pd.api.types.is_string_dtype(content): print(label)
code
32074163/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df.head()
code
32074163/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts() df_tmp.isnull().sum() df_tmp.info()
code
32074163/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts() df_tmp.isnull().sum()
code
32074163/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df.SalePrice.plot.hist()
code
32074163/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
code
32074163/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts() df_tmp.info()
code
32074163/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32074163/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts()
code
32074163/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df.info()
code
32074163/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts() df_tmp.isnull().sum() df_tmp.UsageBand
code
32074163/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df.saledate.head(20)
code
32074163/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts() df_tmp.isnull().sum() pd.api.types.is_string_dtype(df_tmp['UsageBand'])
code
32074163/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.head()
code
32074163/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.sort_values(by=['saledate'], inplace=True, ascending=True) df_tmp = df.copy() df_tmp['saleYear'] = df_tmp.saledate.dt.year df_tmp['saleMonth'] = df_tmp.saledate.dt.month df_tmp['saleDay'] = df_tmp.saledate.dt.day df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear df_tmp.drop('saledate', axis=1, inplace=True) df_tmp.state.value_counts() df_tmp.isnull().sum() pd.api.types.is_string_dtype(df_tmp['UsageBand']) df_tmp.UsageBand for label, content in df_tmp.items(): if pd.api.types.is_string_dtype(content): df_tmp[label] = content.astype('category').cat.as_ordered() df_tmp.info()
code
32074163/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate']) df.saledate.head(20)
code
32074163/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') fig, ax = plt.subplots() ax.scatter(df['saledate'][:1000], df['SalePrice'][:1000])
code
1005908/cell_25
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('no\\s*\\w*\\s*fee')
code
1005908/cell_23
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('doorman')
code
1005908/cell_33
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result def add_feature(row): if re.search('hardwood', row['features_new'], re.IGNORECASE) != None: row['hardwood'] = 1 else: row['hardwood'] = 0 if re.search('doorman', row['features_new'], re.IGNORECASE) != None: row['doorman'] = 1 else: row['doorman'] = 0 if re.search('no\\w*fee', row['features_new'], re.IGNORECASE) != None: row['no_fee'] = 1 else: row['no_fee'] = 0 if re.search('reduce|low\\wfee', row['features_new'], re.IGNORECASE) != None: row['reduce_fee'] = 1 else: row['reduce_fee'] = 0 if re.search('laundry', row['features_new'], re.IGNORECASE) != None: row['laundry'] = 1 else: row['laundry'] = 0 if re.search('war\\Z|war\\s|war_', row['features_new'], re.IGNORECASE) != None: row['war'] = 1 else: row['war'] = 0 if re.search('fitness|gym', row['features_new'], re.IGNORECASE) != None: row['gym'] = 1 else: row['gym'] = 0 return row train = train.apply(add_feature, axis=1) train[['hardwood', 'doorman', 'no_fee', 'reduce_fee', 'laundry', 'war', 'gym']].apply(sum)
code
1005908/cell_6
[ "text_plain_output_1.png" ]
import itertools as it import pandas as pd train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total)
code
1005908/cell_29
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('fitness|gym')
code
1005908/cell_26
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('reduce|low\\sfee')
code
1005908/cell_7
[ "text_plain_output_1.png" ]
import itertools as it import pandas as pd train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) list(uniq_feature_total)[:10]
code
1005908/cell_28
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('war\\Z|war\\s')
code
1005908/cell_17
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import log_loss from sklearn.model_selection import StratifiedShuffleSplit from sklearn.tree import DecisionTreeClassifier import numpy as np import pandas as pd train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() target_num_map = {'high': 0, 'medium': 1, 'low': 2} features = tr_sparse.toarray() labels = train['interest_level'].apply(lambda x: target_num_map[x]).as_matrix() clf = DecisionTreeClassifier(max_depth=5) cv = StratifiedShuffleSplit(n_splits=3, test_size=0.3) for train_idx, test_idx in cv.split(features, labels): features_train, labels_train = (features[train_idx], labels[train_idx]) features_test, labels_test = (features[test_idx], labels[test_idx]) clf.fit(features_train, labels_train) print('log loss:', -1 * round(log_loss(labels_test, clf.predict_proba(features_test)), 3)) print('high importance features:') for idx in np.where(clf.feature_importances_ > 0.05)[0]: print(' ', feature_names[idx], round(clf.feature_importances_[idx], 3))
code
1005908/cell_24
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('fee')
code
1005908/cell_22
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('hardwood')
code
1005908/cell_27
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import itertools as it import pandas as pd import re train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total) uniq_feature_total = set(feature_total) len(uniq_feature_total) vec = CountVectorizer(stop_words='english', max_features=200) train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y])) tr_sparse = vec.fit_transform(train['features_new']) feature_names = vec.get_feature_names() feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) uniq_feature_total = list(set(feature_total)) def re_search(key): """ Present all features with specific re pattern """ result = [] my_reg = '' + key for item in uniq_feature_total: if re.compile(my_reg, re.IGNORECASE).search(item) != None: result.append(item) return result re_search('laundry')
code
1005908/cell_5
[ "text_plain_output_1.png" ]
import itertools as it import pandas as pd train = pd.read_json('../input/train.json') train['listing_id'] = train['listing_id'].apply(str) feature_total = [] train['features'].apply(lambda x: feature_total.append(x)) feature_total = list(it.chain.from_iterable(feature_total)) len(feature_total)
code
73095165/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
Project
code
33120729/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique())) corr = df.corr().round(2) corr mean_deg_c1 = df.groupby('SSM').mean()['degree_C1'] mean_deg_c1 miss_bool = df['degree_C1'].isnull() df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x]) df['degree_C1'].isnull().sum() mean_deg_c3 = df.groupby('SSM').mean()['degree_C3'] mean_deg_c3 miss_bool = df['degree_C3'].isnull() df.loc[miss_bool, 'degree_C3'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c3[x]) df['degree_C3'].isnull().sum() mean_moisture_9 = df.groupby('SSM').mean()['moisture_9'] mean_moisture_9
code
33120729/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.describe()
code
33120729/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df def secSinceNoon(datTimStr): tt = pd.to_datetime(datTimStr).time() return (tt.hour * 3600 + tt.minute * 60 + tt.second) / 60.0 df['SSM'] = df['timestamp'].apply(secSinceNoon) df['SSM']
code
33120729/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' print(df_training.head(3)) print(df_testing.head(3))
code
33120729/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique())) corr = df.corr().round(2) corr mean_deg_c1 = df.groupby('SSM').mean()['degree_C1'] mean_deg_c1 miss_bool = df['degree_C1'].isnull() df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x]) df['degree_C1'].isnull().sum() mean_deg_c3 = df.groupby('SSM').mean()['degree_C3'] mean_deg_c3 miss_bool = df['degree_C3'].isnull() df.loc[miss_bool, 'degree_C3'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c3[x]) df['degree_C3'].isnull().sum()
code
33120729/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) df_training.head()
code
33120729/cell_11
[ "text_html_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False)
code
33120729/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique())) corr = df.corr().round(2) corr mean_deg_c1 = df.groupby('SSM').mean()['degree_C1'] mean_deg_c1 miss_bool = df['degree_C1'].isnull() df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x]) df['degree_C1'].isnull().sum() mean_deg_c3 = df.groupby('SSM').mean()['degree_C3'] mean_deg_c3
code
33120729/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique())) corr = df.corr().round(2) corr mean_deg_c1 = df.groupby('SSM').mean()['degree_C1'] mean_deg_c1 miss_bool = df['degree_C1'].isnull() df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x]) df['degree_C1'].isnull().sum()
code
33120729/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df['timestamp']
code
33120729/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique())) plt.figure(figsize=(10, 8)) sns.boxplot(x=df['luminousity'], y=df['WattHour']) plt.show()
code
33120729/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique())) corr = df.corr().round(2) corr
code
33120729/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_testing.head()
code
33120729/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique())) corr = df.corr().round(2) corr mean_deg_c1 = df.groupby('SSM').mean()['degree_C1'] mean_deg_c1
code
33120729/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) df.apply(lambda x: len(x.unique()))
code
33120729/cell_10
[ "text_html_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df['SSM'].value_counts()
code
33120729/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df df.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(10, 8)) sns.heatmap(df.isnull(), cbar=False, yticklabels=False, cmap='viridis') plt.show()
code
33120729/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd url = '../input/dataquest2020/energy_train.csv' df_training = pd.read_csv(url) url = '../input/dataquest2020/energy_test.csv' df_testing = pd.read_csv(url) df_training['source'] = 'train' df_testing['source'] = 'test' df = pd.concat([df_training, df_testing], axis=0, ignore_index=True) df
code
129001503/cell_4
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from icevision.all import *
code
129001503/cell_2
[ "text_plain_output_1.png" ]
# Download IceVision installation script: !wget https://raw.githubusercontent.com/airctic/icevision/master/icevision_install.sh # Choose installation target: cuda11 or cuda10 or cpu !bash icevision_install.sh cuda10 master
code
129001503/cell_1
[ "text_plain_output_1.png" ]
!python --version
code
129001503/cell_3
[ "text_plain_output_1.png" ]
import IPython import IPython IPython.Application.instance().kernel.do_shutdown(True)
code
129001503/cell_5
[ "text_plain_output_1.png" ]
print("Let's begin!")
code
129039718/cell_4
[ "image_output_11.png", "text_plain_output_100.png", "text_plain_output_334.png", "image_output_239.png", "image_output_98.png", "text_plain_output_445.png", "image_output_337.png", "text_plain_output_201.png", "text_plain_output_261.png", "image_output_121.png", "image_output_180.png", "image_output_331.png", "text_plain_output_84.png", "text_plain_output_322.png", "text_plain_output_205.png", "image_output_379.png", "image_output_384.png", "image_output_303.png", "image_output_157.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_475.png", "text_plain_output_158.png", "text_plain_output_455.png", "image_output_74.png", "text_plain_output_223.png", "image_output_279.png", "text_plain_output_218.png", "text_plain_output_264.png", "image_output_181.png", "text_plain_output_282.png", "image_output_290.png", "text_plain_output_396.png", "image_output_156.png", "text_plain_output_287.png", "image_output_310.png", "text_plain_output_232.png", "image_output_204.png", "text_plain_output_181.png", "text_plain_output_137.png", "image_output_299.png", "text_plain_output_139.png", "text_plain_output_362.png", "image_output_330.png", "text_plain_output_35.png", "image_output_174.png", "image_output_244.png", "text_plain_output_258.png", "text_plain_output_452.png", "text_plain_output_130.png", "image_output_82.png", "text_plain_output_449.png", "text_plain_output_462.png", "text_plain_output_117.png", "image_output_305.png", "image_output_173.png", "text_plain_output_286.png", "image_output_24.png", "text_plain_output_367.png", "text_plain_output_262.png", "image_output_209.png", "text_plain_output_278.png", "text_plain_output_395.png", "image_output_380.png", "text_plain_output_254.png", "text_plain_output_307.png", "image_output_159.png", "image_output_139.png", "image_output_104.png", "image_output_220.png", "text_plain_output_98.png", "text_plain_output_399.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_471.png", "text_plain_output_219.png", "text_plain_output_420.png", "text_plain_output_237.png", "text_plain_output_43.png", "image_output_46.png", "text_plain_output_284.png", "image_output_207.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_78.png", "text_plain_output_143.png", "image_output_327.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "image_output_295.png", "image_output_251.png", "image_output_232.png", "image_output_208.png", "image_output_106.png", "text_plain_output_192.png", "text_plain_output_426.png", "image_output_85.png", "image_output_349.png", "text_plain_output_184.png", "image_output_368.png", "text_plain_output_477.png", "image_output_373.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_332.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "image_output_149.png", "image_output_108.png", "text_plain_output_256.png", "image_output_270.png", "text_plain_output_90.png", "image_output_150.png", "text_plain_output_79.png", "text_plain_output_331.png", "image_output_383.png", "image_output_25.png", "text_plain_output_5.png", "image_output_266.png", "image_output_190.png", "image_output_228.png", "image_output_183.png", "image_output_202.png", "text_plain_output_75.png", "image_output_275.png", "image_output_312.png", "text_plain_output_48.png", "text_plain_output_388.png", "text_plain_output_422.png", "text_plain_output_116.png", "image_output_77.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "image_output_179.png", "image_output_319.png", "image_output_148.png", "text_plain_output_126.png", "image_output_47.png", "image_output_344.png", "image_output_141.png", "text_plain_output_321.png", "text_plain_output_272.png", "image_output_233.png", "image_output_229.png", "text_plain_output_115.png", "text_plain_output_474.png", "text_plain_output_407.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_355.png", "text_plain_output_15.png", "text_plain_output_390.png", "text_plain_output_133.png", "image_output_316.png", "image_output_387.png", "text_plain_output_437.png", "text_plain_output_198.png", "image_output_242.png", "text_plain_output_387.png", "image_output_171.png", "text_plain_output_178.png", "text_plain_output_226.png", "image_output_78.png", "image_output_219.png", "image_output_227.png", "image_output_170.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_375.png", "image_output_17.png", "text_plain_output_404.png", "text_plain_output_114.png", "text_plain_output_157.png", "text_plain_output_317.png", "image_output_30.png", "text_plain_output_251.png", "image_output_257.png", "text_plain_output_470.png", "text_plain_output_423.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_44.png", "image_output_73.png", "image_output_309.png", "text_plain_output_325.png", "image_output_221.png", "text_plain_output_203.png", "image_output_355.png", "text_plain_output_119.png", "text_plain_output_373.png", "text_plain_output_86.png", "image_output_72.png", "text_plain_output_244.png", "image_output_356.png", "text_plain_output_118.png", "image_output_336.png", "image_output_14.png", "image_output_59.png", "image_output_351.png", "image_output_39.png", "image_output_97.png", "text_plain_output_131.png", "image_output_378.png", "text_plain_output_40.png", "image_output_247.png", "text_plain_output_343.png", "image_output_357.png", "image_output_361.png", "text_plain_output_123.png", "text_plain_output_74.png", "image_output_28.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_31.png", "text_plain_output_340.png", "text_plain_output_379.png", "text_plain_output_281.png", "text_plain_output_20.png", "image_output_86.png", "image_output_137.png", "text_plain_output_273.png", "image_output_160.png", "image_output_234.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "image_output_84.png", "image_output_125.png", "text_plain_output_414.png", "text_plain_output_461.png", "image_output_81.png", "image_output_300.png", "image_output_165.png", "text_plain_output_222.png", "image_output_194.png", "text_plain_output_101.png", "image_output_342.png", "image_output_273.png", "text_plain_output_169.png", "text_plain_output_144.png", "text_plain_output_161.png", "image_output_23.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_301.png", "image_output_136.png", "image_output_367.png", "text_plain_output_132.png", "text_plain_output_60.png", "image_output_34.png", "text_plain_output_467.png", "image_output_308.png", "text_plain_output_221.png", "image_output_64.png", "image_output_282.png", "image_output_119.png", "image_output_360.png", "text_plain_output_330.png", "text_plain_output_155.png", "text_plain_output_434.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_64.png", "image_output_237.png", "image_output_225.png", "text_plain_output_419.png", "image_output_131.png", "text_plain_output_215.png", "image_output_388.png", "image_output_134.png", "image_output_178.png", "image_output_177.png", "image_output_377.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_13.png", "image_output_188.png", "text_plain_output_200.png", "image_output_144.png", "text_plain_output_107.png", "image_output_335.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "image_output_252.png", "text_plain_output_318.png", "text_plain_output_417.png", "image_output_13.png", "text_plain_output_52.png", "text_plain_output_393.png", "image_output_128.png", "text_plain_output_66.png", "text_plain_output_446.png", "text_plain_output_243.png", "image_output_184.png", "image_output_155.png", "text_plain_output_45.png", "text_plain_output_380.png", "image_output_40.png", "image_output_224.png", "image_output_5.png", "image_output_48.png", "text_plain_output_442.png", "image_output_114.png", "image_output_146.png", "text_plain_output_300.png", "image_output_68.png", "image_output_195.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "image_output_142.png", "image_output_280.png", "text_plain_output_476.png", "text_plain_output_277.png", "image_output_109.png", "text_plain_output_457.png", "text_plain_output_361.png", "text_plain_output_171.png", "image_output_75.png", "text_plain_output_431.png", "text_plain_output_14.png", "image_output_18.png", "image_output_127.png", "image_output_143.png", "image_output_324.png", "text_plain_output_159.png", "text_plain_output_32.png", "image_output_314.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_29.png", "image_output_389.png", "image_output_313.png", "image_output_283.png", "image_output_58.png", "image_output_245.png", "image_output_118.png", "text_plain_output_359.png", "image_output_145.png", "text_plain_output_347.png", "image_output_254.png", "text_plain_output_140.png", "image_output_269.png", "image_output_385.png", "text_plain_output_376.png", "text_plain_output_280.png", "image_output_296.png", "image_output_110.png", "text_plain_output_129.png", "image_output_116.png", "text_plain_output_349.png", "text_plain_output_242.png", "image_output_286.png", "image_output_277.png", "text_plain_output_483.png", "text_plain_output_460.png", "text_plain_output_363.png", "text_plain_output_289.png", "image_output_169.png", "image_output_271.png", "text_plain_output_255.png", "image_output_374.png", "text_plain_output_160.png", "text_plain_output_58.png", "image_output_107.png", "image_output_92.png", "text_plain_output_329.png", "image_output_21.png", "text_plain_output_49.png", "text_plain_output_63.png", "image_output_372.png", "image_output_248.png", "image_output_120.png", "text_plain_output_260.png", "image_output_276.png", "text_plain_output_294.png", "image_output_256.png", "text_plain_output_27.png", "image_output_332.png", "text_plain_output_392.png", "text_plain_output_320.png", "text_plain_output_177.png", "image_output_105.png", "text_plain_output_386.png", "text_plain_output_438.png", "image_output_52.png", "text_plain_output_76.png", "image_output_288.png", "image_output_362.png", "image_output_307.png", "text_plain_output_333.png", "text_plain_output_108.png", "image_output_292.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "image_output_60.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_6.png", "text_plain_output_326.png", "image_output_7.png", "text_plain_output_153.png", "text_plain_output_170.png", "image_output_343.png", "text_plain_output_92.png", "text_plain_output_57.png", "text_plain_output_120.png", "image_output_62.png", "text_plain_output_469.png", "image_output_96.png", "image_output_186.png", "text_plain_output_24.png", "image_output_182.png", "image_output_152.png", "image_output_322.png", "image_output_185.png", "text_plain_output_357.png", "image_output_235.png", "text_plain_output_21.png", "text_plain_output_344.png", "image_output_167.png", "text_plain_output_104.png", "image_output_56.png", "image_output_386.png", "image_output_196.png", "image_output_346.png", "image_output_31.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_466.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "image_output_65.png", "text_plain_output_288.png", "image_output_115.png", "image_output_291.png", "image_output_20.png", "image_output_359.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "image_output_69.png", "text_plain_output_149.png", "image_output_298.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "image_output_369.png", "text_plain_output_383.png", "text_plain_output_207.png", "image_output_241.png", "image_output_32.png", "text_plain_output_391.png", "image_output_53.png", "image_output_230.png", "text_plain_output_413.png", "image_output_352.png", "text_plain_output_96.png", "text_plain_output_87.png", "text_plain_output_3.png", "text_plain_output_217.png", "text_plain_output_418.png", "image_output_4.png", "text_plain_output_427.png", "text_plain_output_180.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "image_output_304.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_191.png", "text_plain_output_259.png", "text_plain_output_447.png", "text_plain_output_290.png", "image_output_51.png", "image_output_274.png", "image_output_338.png", "text_plain_output_283.png", "text_plain_output_247.png", "image_output_318.png", "image_output_162.png", "image_output_210.png", "text_plain_output_113.png", "text_plain_output_371.png", "image_output_103.png", "image_output_348.png", "text_plain_output_479.png", "image_output_226.png", "text_plain_output_324.png", "text_plain_output_22.png", "image_output_201.png", "image_output_253.png", "text_plain_output_188.png", "text_plain_output_366.png", "image_output_341.png", "image_output_117.png", "image_output_391.png", "text_plain_output_328.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_368.png", "image_output_217.png", "image_output_339.png", "text_plain_output_372.png", "image_output_83.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_454.png", "image_output_382.png", "image_output_317.png", "image_output_213.png", "image_output_172.png", "image_output_42.png", "image_output_306.png", "image_output_381.png", "text_plain_output_338.png", "image_output_240.png", "image_output_35.png", "image_output_263.png", "text_plain_output_197.png", "image_output_311.png", "image_output_90.png", "text_plain_output_382.png", "text_plain_output_315.png", "text_plain_output_429.png", "image_output_302.png", "text_plain_output_38.png", "image_output_41.png", "image_output_57.png", "text_plain_output_433.png", "text_plain_output_7.png", "image_output_260.png", "image_output_222.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "image_output_329.png", "text_plain_output_314.png", "text_plain_output_410.png", "image_output_36.png", "text_plain_output_432.png", "text_plain_output_411.png", "text_plain_output_91.png", "image_output_265.png", "image_output_8.png", "image_output_37.png", "text_plain_output_308.png", "image_output_66.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_174.png", "image_output_16.png", "text_plain_output_212.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_plain_output_430.png", "image_output_192.png", "image_output_211.png", "image_output_163.png", "image_output_91.png", "text_plain_output_435.png", "text_plain_output_378.png", "text_plain_output_59.png", "text_plain_output_409.png", "text_plain_output_206.png", "image_output_70.png", "text_plain_output_103.png", "text_plain_output_71.png", "image_output_138.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_384.png", "text_plain_output_211.png", "image_output_158.png", "text_plain_output_182.png", "text_plain_output_26.png", "image_output_285.png", "text_plain_output_406.png", "text_plain_output_310.png", "image_output_67.png", "text_plain_output_456.png", "image_output_27.png", "image_output_353.png", "image_output_354.png", "image_output_287.png", "text_plain_output_220.png", "image_output_261.png", "image_output_333.png", "image_output_122.png", "text_plain_output_451.png", "image_output_54.png", "text_plain_output_109.png", "image_output_297.png", "text_plain_output_459.png", "text_plain_output_238.png", "image_output_323.png", "image_output_189.png", "image_output_363.png", "image_output_6.png", "text_plain_output_41.png", "text_plain_output_34.png", "image_output_301.png", "image_output_45.png", "text_plain_output_253.png", "text_plain_output_346.png", "text_plain_output_291.png", "image_output_246.png", "text_plain_output_168.png", "text_plain_output_394.png", "text_plain_output_204.png", "image_output_365.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_345.png", "text_plain_output_350.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "image_output_250.png", "text_plain_output_42.png", "image_output_63.png", "image_output_71.png", "text_plain_output_110.png", "text_plain_output_67.png", "image_output_153.png", "image_output_126.png", "text_plain_output_468.png", "text_plain_output_370.png", "image_output_281.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "image_output_80.png", "image_output_289.png", "image_output_112.png", "image_output_164.png", "image_output_293.png", "image_output_326.png", "image_output_347.png", "image_output_95.png", "image_output_123.png", "text_plain_output_193.png", "image_output_147.png", "text_plain_output_441.png", "image_output_198.png", "text_plain_output_403.png", "image_output_370.png", "image_output_212.png", "text_plain_output_23.png", "image_output_278.png", "image_output_364.png", "image_output_340.png", "text_plain_output_173.png", "text_plain_output_235.png", "image_output_243.png", "text_plain_output_151.png", "text_plain_output_89.png", "image_output_93.png", "text_plain_output_299.png", "image_output_205.png", "image_output_206.png", "text_plain_output_51.png", "text_plain_output_450.png", "text_plain_output_252.png", "image_output_214.png", "image_output_12.png", "text_plain_output_296.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "image_output_284.png", "image_output_161.png", "image_output_231.png", "text_plain_output_381.png", "text_plain_output_163.png", "text_plain_output_179.png", "image_output_22.png", "image_output_132.png", "text_plain_output_162.png", "text_plain_output_136.png", "image_output_328.png", "text_plain_output_246.png", "text_plain_output_2.png", "image_output_320.png", "text_plain_output_239.png", "text_plain_output_127.png", "image_output_89.png", "text_plain_output_311.png", "image_output_315.png", "image_output_268.png", "image_output_55.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_337.png", "text_plain_output_196.png", "image_output_133.png", "text_plain_output_342.png", "text_plain_output_97.png", "image_output_216.png", "image_output_218.png", "image_output_191.png", "image_output_151.png", "text_plain_output_227.png", "image_output_200.png", "text_plain_output_453.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_150.png", "image_output_294.png", "image_output_94.png", "text_plain_output_39.png", "image_output_3.png", "image_output_111.png", "text_plain_output_176.png", "image_output_101.png", "image_output_366.png", "image_output_249.png", "text_plain_output_335.png", "image_output_135.png", "text_plain_output_186.png", "image_output_29.png", "text_plain_output_233.png", "text_plain_output_228.png", "image_output_238.png", "text_plain_output_473.png", "image_output_325.png", "text_plain_output_385.png", "text_plain_output_478.png", "text_plain_output_55.png", "text_plain_output_412.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_436.png", "image_output_193.png", "text_plain_output_199.png", "text_plain_output_354.png", "text_plain_output_463.png", "text_plain_output_360.png", "text_plain_output_319.png", "text_plain_output_82.png", "image_output_187.png", "text_plain_output_356.png", "image_output_44.png", "text_plain_output_202.png", "image_output_199.png", "image_output_130.png", "text_plain_output_93.png", "text_plain_output_336.png", "image_output_43.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "image_output_2.png", "image_output_375.png", "image_output_262.png", "image_output_1.png", "text_plain_output_105.png", "text_plain_output_465.png", "image_output_350.png", "text_plain_output_80.png", "image_output_10.png", "image_output_259.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_444.png", "image_output_168.png", "image_output_258.png", "text_plain_output_216.png", "text_plain_output_124.png", "image_output_236.png", "text_plain_output_17.png", "text_plain_output_148.png", "image_output_154.png", "text_plain_output_323.png", "image_output_102.png", "text_plain_output_402.png", "text_plain_output_424.png", "image_output_176.png", "text_plain_output_250.png", "image_output_321.png", "image_output_175.png", "text_plain_output_11.png", "text_plain_output_481.png", "image_output_124.png", "text_plain_output_400.png", "image_output_88.png", "text_plain_output_12.png", "image_output_272.png", "text_plain_output_267.png", "text_plain_output_408.png", "image_output_33.png", "text_plain_output_425.png", "text_plain_output_428.png", "image_output_140.png", "text_plain_output_416.png", "text_plain_output_194.png", "image_output_345.png", "image_output_358.png", "text_plain_output_62.png", "image_output_87.png", "text_plain_output_480.png", "text_plain_output_303.png", "image_output_255.png", "text_plain_output_377.png", "image_output_50.png", "text_plain_output_440.png", "text_plain_output_95.png", "text_plain_output_339.png", "text_plain_output_458.png", "image_output_15.png", "image_output_267.png", "image_output_99.png", "image_output_197.png", "text_plain_output_464.png", "image_output_100.png", "text_plain_output_156.png", "image_output_129.png", "text_plain_output_298.png", "text_plain_output_369.png", "text_plain_output_348.png", "text_plain_output_448.png", "text_plain_output_364.png", "image_output_166.png", "text_plain_output_365.png", "text_plain_output_61.png", "image_output_76.png", "text_plain_output_352.png", "image_output_223.png", "image_output_9.png", "text_plain_output_83.png", "image_output_19.png", "text_plain_output_374.png", "image_output_371.png", "image_output_79.png", "text_plain_output_472.png", "text_plain_output_397.png", "image_output_215.png", "image_output_61.png", "text_plain_output_389.png", "image_output_203.png", "image_output_390.png", "text_plain_output_292.png", "text_plain_output_351.png", "image_output_38.png", "image_output_334.png", "text_plain_output_135.png", "text_plain_output_285.png", "image_output_113.png", "image_output_26.png", "text_plain_output_306.png", "image_output_376.png", "image_output_264.png", "text_plain_output_46.png" ]
import cv2 import glob mask_directory = '/kaggle/input/mask-images/testMasks' mask_names = glob.glob('/kaggle/input/mask-images/testMasks/*.tif') mask_names = sorted(mask_names, key=lambda x: (len(x), x)) masks = [cv2.imread(mask, 0) for mask in mask_names] for i in range(len(masks)): print(i) plt.imshow(masks[i], cmap='gray') plt.pause(0.1)
code
129039718/cell_3
[ "text_plain_output_1.png" ]
import cv2 import glob mask_directory = '/kaggle/input/mask-images/testMasks' mask_names = glob.glob('/kaggle/input/mask-images/testMasks/*.tif') mask_names = sorted(mask_names, key=lambda x: (len(x), x)) print(mask_names[0:6]) masks = [cv2.imread(mask, 0) for mask in mask_names] print(len(masks))
code
90108947/cell_21
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import LSTM from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler import math import matplotlib.pylab as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv' df = pd.read_csv(data) df df.shape data_plot = pd.read_csv(data, sep=',', parse_dates=['Date'], index_col='Date') new_df = df.filter(['Close']) dataset = new_df.values training_data_len = math.ceil(len(dataset) * 0.8) training_data_len scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) scaled_data train_data = scaled_data[0:training_data_len, :] X_train = [] Y_train = [] for i in range(60, len(train_data)): X_train.append(train_data[i - 60:i, 0]) Y_train.append(train_data[i, 0]) X_train, Y_train = (np.array(X_train), np.array(Y_train)) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_train.shape model = Sequential() model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1))) model.add(LSTM(50, return_sequences=False)) model.add(Dense(25)) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error') model.fit(X_train, Y_train, batch_size=1, epochs=1) test_data = scaled_data[training_data_len - 60:, :] X_test = [] Y_test = dataset[training_data_len:, :] for i in range(60, len(test_data)): X_test.append(test_data[i - 60:i, 0]) X_test = np.array(X_test) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predictions = model.predict(X_test) predictions = scaler.inverse_transform(predictions) train = df[:training_data_len] valid = df[training_data_len:] valid['Predictions'] = predictions plt.figure(figsize=(16, 8)) plt.title('Model') plt.xlabel('Date', fontsize=18) plt.ylabel('Closing Price', fontsize=18) plt.plot(train['Close']) plt.plot(valid[['Close', 'Predictions']]) plt.legend(['Train', 'Val', 'Predictions'], loc='lower right') plt.show()
code
90108947/cell_9
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import math import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv' df = pd.read_csv(data) df df.shape new_df = df.filter(['Close']) dataset = new_df.values training_data_len = math.ceil(len(dataset) * 0.8) training_data_len scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) scaled_data train_data = scaled_data[0:training_data_len, :] X_train = [] Y_train = [] for i in range(60, len(train_data)): X_train.append(train_data[i - 60:i, 0]) Y_train.append(train_data[i, 0]) if i <= 61: print(X_train) print(Y_train) print()
code