path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
121152202/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import tensorflow as tf df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) y = df.pop('Strength') df['tot_comp'] = df.iloc[:, :7].sum(axis=1) df['ageinmonth'] = df.AgeInDays // 30 / 12 df['AgeInDays'] = df.AgeInDays / 365 df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1) df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12 df_test['AgeInDays'] = df_test.AgeInDays / 365 df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp) df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1) df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp) df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1) model_nn = tf.keras.Sequential(name='neural_network') model_nn.add(tf.keras.layers.Dense(9, activation='relu', input_shape=(df_transform.shape[1],))) model_nn.add(tf.keras.layers.Dense(16, activation='relu')) model_nn.add(tf.keras.layers.Dense(1, activation='relu')) model_nn.summary()
code
121152202/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) df_test.head()
code
121152202/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) print(f'the competition dataset shape is {df.shape}')
code
121152202/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) y = df.pop('Strength') df['tot_comp'] = df.iloc[:, :7].sum(axis=1) df['ageinmonth'] = df.AgeInDays // 30 / 12 df['AgeInDays'] = df.AgeInDays / 365 df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1) df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12 df_test['AgeInDays'] = df_test.AgeInDays / 365 df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp) df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1) df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp) df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1) model_nn = tf.keras.Sequential(name='neural_network') model_nn.add(tf.keras.layers.Dense(9, activation='relu', input_shape=(df_transform.shape[1],))) model_nn.add(tf.keras.layers.Dense(16, activation='relu')) model_nn.add(tf.keras.layers.Dense(1, activation='relu')) model_nn.summary() tf.random.set_seed(9) model_nn.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0003), loss='mse', metrics=tf.keras.metrics.RootMeanSquaredError()) history = model_nn.fit(df_transform, y, batch_size=64, epochs=250, validation_split=0.3, verbose=2) plt.plot(history.history['root_mean_squared_error'][10:], label='train RMSE') plt.plot(history.history['val_root_mean_squared_error'][10:], label='valid RMSE') plt.legend()
code
121152202/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, KFold from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import lightgbm as lgbm import tensorflow as tf import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121152202/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) y = df.pop('Strength') df['tot_comp'] = df.iloc[:, :7].sum(axis=1) df['ageinmonth'] = df.AgeInDays // 30 / 12 df['AgeInDays'] = df.AgeInDays / 365 df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1) df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12 df_test['AgeInDays'] = df_test.AgeInDays / 365 df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp) df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1) df_transform.head() df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp) df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1) df_test_transform.head()
code
121152202/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) y = df.pop('Strength') df['tot_comp'] = df.iloc[:, :7].sum(axis=1) df['ageinmonth'] = df.AgeInDays // 30 / 12 df['AgeInDays'] = df.AgeInDays / 365 df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1) df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12 df_test['AgeInDays'] = df_test.AgeInDays / 365 df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp) df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1) df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp) df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1) df_transform.head()
code
121152202/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) print(f'the addition dataset shape is {df_add.shape}') df = pd.concat([df, df_add]) print(f'the new dataset shape is {df.shape}')
code
121152202/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) y = df.pop('Strength') df['tot_comp'] = df.iloc[:, :7].sum(axis=1) df['ageinmonth'] = df.AgeInDays // 30 / 12 df['AgeInDays'] = df.AgeInDays / 365 df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1) df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12 df_test['AgeInDays'] = df_test.AgeInDays / 365 df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp) df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1) df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp) df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1) model_nn = tf.keras.Sequential(name='neural_network') model_nn.add(tf.keras.layers.Dense(9, activation='relu', input_shape=(df_transform.shape[1],))) model_nn.add(tf.keras.layers.Dense(16, activation='relu')) model_nn.add(tf.keras.layers.Dense(1, activation='relu')) model_nn.summary() tf.random.set_seed(9) model_nn.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0003), loss='mse', metrics=tf.keras.metrics.RootMeanSquaredError()) history = model_nn.fit(df_transform, y, batch_size=64, epochs=250, validation_split=0.3, verbose=2) predictions = model_nn.predict(df_test_transform) subs = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv') subs['Strength'] = predictions subs.head()
code
121152202/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import tensorflow as tf df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) y = df.pop('Strength') df['tot_comp'] = df.iloc[:, :7].sum(axis=1) df['ageinmonth'] = df.AgeInDays // 30 / 12 df['AgeInDays'] = df.AgeInDays / 365 df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1) df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12 df_test['AgeInDays'] = df_test.AgeInDays / 365 df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp) df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1) df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp) df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1) model_nn = tf.keras.Sequential(name='neural_network') model_nn.add(tf.keras.layers.Dense(9, activation='relu', input_shape=(df_transform.shape[1],))) model_nn.add(tf.keras.layers.Dense(16, activation='relu')) model_nn.add(tf.keras.layers.Dense(1, activation='relu')) model_nn.summary() tf.random.set_seed(9) model_nn.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0003), loss='mse', metrics=tf.keras.metrics.RootMeanSquaredError()) history = model_nn.fit(df_transform, y, batch_size=64, epochs=250, validation_split=0.3, verbose=2)
code
121152202/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv') df_test.drop(columns='id', inplace=True) y = df.pop('Strength') df['tot_comp'] = df.iloc[:, :7].sum(axis=1) df['ageinmonth'] = df.AgeInDays // 30 / 12 df['AgeInDays'] = df.AgeInDays / 365 df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1) df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12 df_test['AgeInDays'] = df_test.AgeInDays / 365 df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp) df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1) df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp) df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1) model_nn = tf.keras.Sequential(name='neural_network') model_nn.add(tf.keras.layers.Dense(9, activation='relu', input_shape=(df_transform.shape[1],))) model_nn.add(tf.keras.layers.Dense(16, activation='relu')) model_nn.add(tf.keras.layers.Dense(1, activation='relu')) model_nn.summary() tf.random.set_seed(9) model_nn.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0003), loss='mse', metrics=tf.keras.metrics.RootMeanSquaredError()) history = model_nn.fit(df_transform, y, batch_size=64, epochs=250, validation_split=0.3, verbose=2) predictions = model_nn.predict(df_test_transform) sns.kdeplot(y, color='chocolate', label='true value') sns.kdeplot(predictions, label='prediction') plt.legend()
code
121152202/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv') df.drop(columns='id', inplace=True) df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv') df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True) df = pd.concat([df, df_add]) df.head()
code
128033768/cell_13
[ "text_html_output_1.png" ]
from keras.layers import LSTM, Dense from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler from statsmodels.tsa.seasonal import seasonal_decompose import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') train_data = train_data[(train_data['meanpressure'] > 980) & (train_data['meanpressure'] < 1050)] data_to_show = train_data.drop('date', axis=1) ts_decomposition = seasonal_decompose(x=train_data['meantemp'], model='additive', period=365) trend_estimate = ts_decomposition.trend seasonal_estimate = ts_decomposition.seasonal residual_estimate = ts_decomposition.resid model = Sequential([LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)), LSTM(units=50, return_sequences=False), Dense(units=25), Dense(units=1)]) model.compile(optimizer='adam', loss='mean_squared_error') history = model.fit(x_train, y_train, epochs=30, batch_size=32) x_test, y_test = generate_forecast(scaled_data, train_size) y_test = scaler.inverse_transform(y_test) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) preds_acts = pd.DataFrame(data={'Predictions': predictions.flatten(), 'Actuals': y_test.flatten()}) plt.figure(figsize=(16, 6)) plt.plot(preds_acts['Predictions']) plt.plot(preds_acts['Actuals']) plt.legend(['Predictions', 'Actuals']) plt.show()
code
128033768/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from statsmodels.tsa.seasonal import seasonal_decompose import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') train_data = train_data[(train_data['meanpressure'] > 980) & (train_data['meanpressure'] < 1050)] data_to_show = train_data.drop('date', axis=1) ts_decomposition = seasonal_decompose(x=train_data['meantemp'], model='additive', period=365) trend_estimate = ts_decomposition.trend seasonal_estimate = ts_decomposition.seasonal residual_estimate = ts_decomposition.resid plt.figure(figsize=(15, 5)) plt.plot(train_data['meantemp'], label='Original') plt.legend() plt.figure(figsize=(15, 5)) plt.plot(trend_estimate, label='Trend') plt.legend() plt.figure(figsize=(15, 5)) plt.plot(seasonal_estimate, label='Seasonal') plt.legend() plt.figure(figsize=(15, 5)) plt.plot(residual_estimate, label='Residual') plt.legend()
code
128033768/cell_4
[ "image_output_4.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') train_data.info() test_data.info()
code
128033768/cell_6
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') test_data.head()
code
128033768/cell_11
[ "text_html_output_1.png" ]
from keras.layers import LSTM, Dense from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) model = Sequential([LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)), LSTM(units=50, return_sequences=False), Dense(units=25), Dense(units=1)]) model.compile(optimizer='adam', loss='mean_squared_error') history = model.fit(x_train, y_train, epochs=30, batch_size=32)
code
128033768/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import LSTM, Dense from sklearn.preprocessing import MinMaxScaler import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from statsmodels.tsa.arima_model import ARIMA from statsmodels.tsa.stattools import adfuller from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.stattools import acf, pacf from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error import itertools
code
128033768/cell_7
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') train_data.describe()
code
128033768/cell_8
[ "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') train_data = train_data[(train_data['meanpressure'] > 980) & (train_data['meanpressure'] < 1050)] data_to_show = train_data.drop('date', axis=1) show_type_1(data_to_show)
code
128033768/cell_10
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') train_data = train_data[(train_data['meanpressure'] > 980) & (train_data['meanpressure'] < 1050)] data_to_show = train_data.drop('date', axis=1) x_train, y_train, scaler, train_size, test_size, scaled_data = prepare_data(train_data, 'meantemp')
code
128033768/cell_12
[ "text_html_output_1.png" ]
from keras.layers import LSTM, Dense from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) model = Sequential([LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)), LSTM(units=50, return_sequences=False), Dense(units=25), Dense(units=1)]) model.compile(optimizer='adam', loss='mean_squared_error') history = model.fit(x_train, y_train, epochs=30, batch_size=32) x_test, y_test = generate_forecast(scaled_data, train_size) y_test = scaler.inverse_transform(y_test) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions)
code
128033768/cell_5
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd class Logger: RESET = '\x1b[0m' RED = '\x1b[31m' GREEN = '\x1b[32m' def info(self, message: str): pass def error(self, message: str): pass logger = Logger() def load_data(file: str) -> pd.DataFrame: data = pd.read_csv(file) return data def show_type_1(data: pd.DataFrame) -> None: colors = ['#FF7F50', '#DDA0DD', '#66CDAA', '#BC8F8F'] def prepare_data(dataFrame: pd.DataFrame, target: str): n_cols = 1 dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(data)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) return (x_train, y_train, scaler, train_size, test_size, scaled_data) def generate_forecast(scaled_data, train_size) -> (np.array, np.array): time_steps = 60 test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test) def generate_forecast_v2(dataFrame: pd.DataFrame, target: str) -> (np.array, np.array): dataset = dataFrame[target] dataset = pd.DataFrame(dataset) data = dataset.values train_size = int(len(data) * 0.75) test_size = len(data) - train_size scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(np.array(dataset)) train_data = scaled_data[0:train_size, :] x_train = [] y_train = [] time_steps = 60 n_cols = 1 for i in range(time_steps, len(scaled_data)): x_train.append(scaled_data[i - time_steps:i, :n_cols]) y_train.append(scaled_data[i, :n_cols]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], n_cols)) test_data = scaled_data[train_size - time_steps:, :] x_test = [] y_test = [] n_cols = 1 for i in range(time_steps, len(test_data)): x_test.append(test_data[i - time_steps:i, 0:n_cols]) y_test.append(test_data[i, 0:n_cols]) x_test, y_test = (np.array(x_test), np.array(y_test)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], n_cols)) return (x_test, y_test, train_size) train_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv') test_data = load_data('/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTest.csv') train_data.head()
code
72094126/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/heart-disease-uci/heart.csv') data.info()
code
72094126/cell_6
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.ensemble import RandomForestClassifier tree = RandomForestClassifier() tree.fit(xtrain, ytrain) ypred = tree.predict(xtest) print('Prediction Accuracy', metrics.accuracy_score(ytest, ypred))
code
72094126/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72094126/cell_7
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/heart-disease-uci/heart.csv') from sklearn import metrics from sklearn.ensemble import RandomForestClassifier tree = RandomForestClassifier() tree.fit(xtrain, ytrain) ypred = tree.predict(xtest) ypred = tree.predict(xtest) pd.DataFrame({'predicted': ypred, 'actual': ytest})
code
72094126/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/heart-disease-uci/heart.csv') data.head()
code
122263483/cell_6
[ "text_plain_output_100.png", "text_plain_output_334.png", "text_plain_output_445.png", "text_plain_output_201.png", "text_plain_output_586.png", "text_plain_output_261.png", "text_plain_output_565.png", "text_plain_output_522.png", "text_plain_output_84.png", "text_plain_output_521.png", "text_plain_output_322.png", "text_plain_output_205.png", "text_plain_output_511.png", "text_plain_output_608.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_475.png", "text_plain_output_158.png", "text_plain_output_455.png", "text_plain_output_223.png", "text_plain_output_218.png", "text_plain_output_264.png", "text_plain_output_282.png", "text_plain_output_579.png", "text_plain_output_396.png", "text_plain_output_287.png", "text_plain_output_232.png", "text_plain_output_181.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_362.png", "text_plain_output_35.png", "text_plain_output_501.png", "text_plain_output_593.png", "text_plain_output_258.png", "text_plain_output_452.png", "text_plain_output_130.png", "text_plain_output_598.png", "text_plain_output_490.png", "text_plain_output_449.png", "text_plain_output_462.png", "text_plain_output_117.png", "text_plain_output_286.png", "text_plain_output_367.png", "text_plain_output_262.png", "text_plain_output_278.png", "text_plain_output_588.png", "text_plain_output_395.png", "text_plain_output_254.png", "text_plain_output_307.png", "text_plain_output_570.png", "text_plain_output_98.png", "text_plain_output_399.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_471.png", "text_plain_output_219.png", "text_plain_output_614.png", "text_plain_output_420.png", "text_plain_output_514.png", "text_plain_output_485.png", "text_plain_output_237.png", "text_plain_output_43.png", "text_plain_output_284.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_576.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_544.png", "text_plain_output_192.png", "text_plain_output_426.png", "text_plain_output_184.png", "text_plain_output_477.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_613.png", "text_plain_output_332.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "text_plain_output_256.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_331.png", "text_plain_output_5.png", "text_plain_output_550.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_388.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_492.png", "text_plain_output_321.png", "text_plain_output_272.png", "text_plain_output_115.png", "text_plain_output_474.png", "text_plain_output_407.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_355.png", "text_plain_output_15.png", "text_plain_output_390.png", "text_plain_output_133.png", "text_plain_output_437.png", "text_plain_output_198.png", "text_plain_output_387.png", "text_plain_output_555.png", "text_plain_output_548.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_375.png", "text_plain_output_404.png", "text_plain_output_114.png", "text_plain_output_515.png", "text_plain_output_157.png", "text_plain_output_494.png", "text_plain_output_317.png", "text_plain_output_251.png", "text_plain_output_470.png", "text_plain_output_496.png", "text_plain_output_423.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_484.png", "text_plain_output_44.png", "text_plain_output_325.png", "text_plain_output_203.png", "text_plain_output_505.png", "text_plain_output_603.png", "text_plain_output_119.png", "text_plain_output_546.png", "text_plain_output_540.png", "text_plain_output_373.png", "text_plain_output_504.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_118.png", "text_plain_output_551.png", "text_plain_output_583.png", "text_plain_output_131.png", "text_plain_output_40.png", "text_plain_output_343.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_604.png", "text_plain_output_31.png", "text_plain_output_340.png", "text_plain_output_379.png", "text_plain_output_281.png", "text_plain_output_20.png", "text_plain_output_557.png", "text_plain_output_273.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "text_plain_output_414.png", "text_plain_output_461.png", "text_plain_output_510.png", "text_plain_output_222.png", "text_plain_output_589.png", "text_plain_output_101.png", "text_plain_output_530.png", "text_plain_output_169.png", "text_plain_output_531.png", "text_plain_output_144.png", "text_plain_output_161.png", "text_plain_output_489.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_301.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_467.png", "text_plain_output_502.png", "text_plain_output_221.png", "text_plain_output_596.png", "text_plain_output_564.png", "text_plain_output_552.png", "text_plain_output_330.png", "text_plain_output_155.png", "text_plain_output_434.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_64.png", "text_plain_output_419.png", "text_plain_output_215.png", "text_plain_output_532.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_107.png", "text_plain_output_567.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "text_plain_output_318.png", "text_plain_output_417.png", "text_plain_output_52.png", "text_plain_output_545.png", "text_plain_output_393.png", "text_plain_output_572.png", "text_plain_output_594.png", "text_plain_output_66.png", "text_plain_output_446.png", "text_plain_output_243.png", "text_plain_output_611.png", "text_plain_output_45.png", "text_plain_output_380.png", "text_plain_output_599.png", "text_plain_output_442.png", "text_plain_output_300.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "text_plain_output_476.png", "text_plain_output_277.png", "text_plain_output_457.png", "text_plain_output_361.png", "text_plain_output_171.png", "text_plain_output_518.png", "text_plain_output_561.png", "text_plain_output_431.png", "text_plain_output_14.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_516.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_29.png", "text_plain_output_359.png", "text_plain_output_529.png", "text_plain_output_347.png", "text_plain_output_140.png", "text_plain_output_606.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_129.png", "text_plain_output_349.png", "text_plain_output_242.png", "text_plain_output_483.png", "text_plain_output_460.png", "text_plain_output_363.png", "text_plain_output_289.png", "text_plain_output_255.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_329.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_260.png", "text_plain_output_294.png", "text_plain_output_27.png", "text_plain_output_392.png", "text_plain_output_320.png", "text_plain_output_177.png", "text_plain_output_607.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_333.png", "text_plain_output_108.png", "text_plain_output_581.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_6.png", "text_plain_output_326.png", "text_plain_output_503.png", "text_plain_output_578.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_469.png", "text_plain_output_24.png", "text_plain_output_357.png", "text_plain_output_21.png", "text_plain_output_344.png", "text_plain_output_104.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_466.png", "text_plain_output_568.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_523.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "text_plain_output_288.png", "text_plain_output_535.png", "text_plain_output_527.png", "text_plain_output_488.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "text_plain_output_149.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_383.png", "text_plain_output_207.png", "text_plain_output_391.png", "text_plain_output_413.png", "text_plain_output_96.png", "text_plain_output_87.png", "text_plain_output_3.png", "text_plain_output_217.png", "text_plain_output_418.png", "text_plain_output_427.png", "text_plain_output_180.png", "text_plain_output_556.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_191.png", "text_plain_output_609.png", "text_plain_output_259.png", "text_plain_output_447.png", "text_plain_output_290.png", "text_plain_output_506.png", "text_plain_output_283.png", "text_plain_output_495.png", "text_plain_output_247.png", "text_plain_output_113.png", "text_plain_output_371.png", "text_plain_output_479.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "text_plain_output_328.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_368.png", "text_plain_output_372.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_542.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_454.png", "text_plain_output_487.png", "text_plain_output_595.png", "text_plain_output_338.png", "text_plain_output_575.png", "text_plain_output_197.png", "text_plain_output_512.png", "text_plain_output_382.png", "text_plain_output_315.png", "text_plain_output_429.png", "text_plain_output_38.png", "text_plain_output_517.png", "text_plain_output_433.png", "text_plain_output_7.png", "text_plain_output_528.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "text_plain_output_513.png", "text_plain_output_314.png", "text_plain_output_592.png", "text_plain_output_410.png", "text_plain_output_432.png", "text_plain_output_411.png", "text_plain_output_91.png", "text_plain_output_308.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_497.png", "text_plain_output_174.png", "text_plain_output_212.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_plain_output_430.png", "text_plain_output_435.png", "text_plain_output_378.png", "text_plain_output_59.png", "text_plain_output_580.png", "text_plain_output_409.png", "text_plain_output_206.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_539.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_384.png", "text_plain_output_498.png", "text_plain_output_211.png", "text_plain_output_182.png", "text_plain_output_26.png", "text_plain_output_601.png", "text_plain_output_554.png", "text_plain_output_536.png", "text_plain_output_406.png", "text_plain_output_310.png", "text_plain_output_456.png", "text_plain_output_541.png", "text_plain_output_558.png", "text_plain_output_220.png", "text_plain_output_543.png", "text_plain_output_451.png", "text_plain_output_109.png", "text_plain_output_459.png", "text_plain_output_238.png", "text_plain_output_520.png", "text_plain_output_615.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_612.png", "text_plain_output_253.png", "text_plain_output_346.png", "text_plain_output_291.png", "text_plain_output_168.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_533.png", "text_plain_output_345.png", "text_plain_output_350.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_605.png", "text_plain_output_549.png", "text_plain_output_67.png", "text_plain_output_508.png", "text_plain_output_573.png", "text_plain_output_468.png", "text_plain_output_370.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "text_plain_output_193.png", "text_plain_output_441.png", "text_plain_output_403.png", "text_plain_output_23.png", "text_plain_output_610.png", "text_plain_output_173.png", "text_plain_output_235.png", "text_plain_output_151.png", "text_plain_output_89.png", "text_plain_output_299.png", "text_plain_output_51.png", "text_plain_output_450.png", "text_plain_output_252.png", "text_plain_output_296.png", "text_plain_output_525.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_381.png", "text_plain_output_571.png", "text_plain_output_163.png", "text_plain_output_179.png", "text_plain_output_537.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_602.png", "text_plain_output_246.png", "text_plain_output_2.png", "text_plain_output_569.png", "text_plain_output_239.png", "text_plain_output_127.png", "text_plain_output_559.png", "text_plain_output_311.png", "text_plain_output_500.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_507.png", "text_plain_output_590.png", "text_plain_output_509.png", "text_plain_output_337.png", "text_plain_output_562.png", "text_plain_output_499.png", "text_plain_output_196.png", "text_plain_output_342.png", "text_plain_output_563.png", "text_plain_output_97.png", "text_plain_output_227.png", "text_plain_output_453.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_150.png", "text_plain_output_39.png", "text_plain_output_176.png", "text_plain_output_584.png", "text_plain_output_335.png", "text_plain_output_186.png", "text_plain_output_233.png", "text_plain_output_228.png", "text_plain_output_473.png", "text_plain_output_385.png", "text_plain_output_478.png", "text_plain_output_55.png", "text_plain_output_412.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_199.png", "text_plain_output_354.png", "text_plain_output_463.png", "text_plain_output_360.png", "text_plain_output_319.png", "text_plain_output_82.png", "text_plain_output_356.png", "text_plain_output_202.png", "text_plain_output_93.png", "text_plain_output_336.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "text_plain_output_105.png", "text_plain_output_465.png", "text_plain_output_80.png", "text_plain_output_491.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_534.png", "text_plain_output_444.png", "text_plain_output_216.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_323.png", "text_plain_output_402.png", "text_plain_output_424.png", "text_plain_output_486.png", "text_plain_output_597.png", "text_plain_output_250.png", "text_plain_output_11.png", "text_plain_output_481.png", "text_plain_output_560.png", "text_plain_output_526.png", "text_plain_output_400.png", "text_plain_output_524.png", "text_plain_output_538.png", "text_plain_output_12.png", "text_plain_output_267.png", "text_plain_output_553.png", "text_plain_output_408.png", "text_plain_output_425.png", "text_plain_output_591.png", "text_plain_output_428.png", "text_plain_output_416.png", "text_plain_output_194.png", "text_plain_output_577.png", "text_plain_output_519.png", "text_plain_output_62.png", "text_plain_output_480.png", "text_plain_output_303.png", "text_plain_output_377.png", "text_plain_output_440.png", "text_plain_output_95.png", "text_plain_output_339.png", "text_plain_output_458.png", "text_plain_output_464.png", "text_plain_output_156.png", "text_plain_output_547.png", "text_plain_output_298.png", "text_plain_output_369.png", "text_plain_output_348.png", "text_plain_output_587.png", "text_plain_output_448.png", "text_plain_output_365.png", "text_plain_output_61.png", "text_plain_output_585.png", "text_plain_output_352.png", "text_plain_output_83.png", "text_plain_output_374.png", "text_plain_output_472.png", "text_plain_output_566.png", "text_plain_output_397.png", "text_plain_output_600.png", "text_plain_output_389.png", "text_plain_output_292.png", "text_plain_output_351.png", "text_plain_output_135.png", "text_plain_output_285.png", "text_plain_output_574.png", "text_plain_output_582.png", "text_plain_output_306.png", "text_plain_output_493.png", "text_plain_output_46.png" ]
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf df = pd.read_parquet('/kaggle/input/sampled-datasets-v2/NF-UNSW-NB15-V2.parquet') columns_to_remove = ['L4_SRC_PORT', 'L4_DST_PORT', 'Label', 'Attack'] train, test = train_test_split(df, test_size=0.2, shuffle=True) y_test = np.array(test['Label'], dtype=np.uint0) train_benign_idx = train['Label'] == 0 train_attack_idx = train['Label'] == 1 train.drop(columns=columns_to_remove, axis=1, inplace=True) test.drop(columns=columns_to_remove, axis=1, inplace=True) train_normal = train[train_benign_idx].values train_attack = train[train_attack_idx].values scaler = MinMaxScaler() train = scaler.fit_transform(train_normal) train_attack = scaler.transform(train_attack) train, validation = train_test_split(train, test_size=0.2) """ Function to test a model ======================== - threshold_quantile: IF reconstruction loss > specified quantile of validation losses => attack! ELSE => benign! - validation_benign: benign samples used for validation (and threshold determination) - validation_attack: attack samples used for validation - test: test data (both benign and attack samples) - y_test: ground truth of the test data - mae: IF true => loss function equals Mean Absolute Error ELSE loss function equals Mean Squared Error """ def test_model(model, threshold_quantile, validation_benign, validation_attack, test, y_test, mae=True): val_losses = None if mae: val_losses = np.mean(abs(validation_benign - model.predict(validation_benign)), axis=1) else: val_losses = np.mean((validation_benign - model.predict(validation_benign)) ** 2, axis=1) val_losses = pd.DataFrame({'benign': val_losses}) attack_losses = None if mae: attack_losses = np.mean(abs(validation_attack - model.predict(validation_attack)), axis=1) else: attack_losses = np.mean((validation_attack - model.predict(validation_attack)) ** 2, axis=1) attack_losses = pd.DataFrame({'attack': attack_losses}) threshold = np.quantile(val_losses, 0.99) test_losses = None recons = model.predict(test) if mae: test_losses = np.mean(abs(test - recons), axis=1) else: test_losses = np.mean((test - recons) ** 2, axis=1) preds = np.array(test_losses > threshold, dtype=np.uint8) tn, fp, fn, tp = confusion_matrix(y_test, preds).ravel() model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(32, activation='relu')) model.add(tf.keras.layers.Dense(16, activation='relu')) model.add(tf.keras.layers.Dense(8, activation='relu')) model.add(tf.keras.layers.Dense(4, activation='relu')) model.add(tf.keras.layers.Dense(8, activation='relu')) model.add(tf.keras.layers.Dense(16, activation='relu')) model.add(tf.keras.layers.Dense(32, activation='relu')) model.add(tf.keras.layers.Dense(39, activation='sigmoid')) model.compile(optimizer='adam', loss='mae') es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3) model.fit(train, train, batch_size=128, epochs=50, validation_split=0.1, shuffle=True, callbacks=[es]) print("TRAINED WITH LOSS 'MAE':") print('=' * 20) print('\tEVALUATE WITH MAE & QUANTILE 0.95:') test_model(model, 0.95, validation, train_attack, test, y_test) print('\tEVALUATE WITH MAE & QUANTILE 0.98:') test_model(model, 0.98, validation, train_attack, test, y_test) print('\tEVALUATE WITH MSE & QUANTILE 0.95:') test_model(model, 0.95, validation, train_attack, test, y_test, mae=False) print('\tEVALUATE WITH MSE & QUANTILE 0.98:') test_model(model, 0.98, validation, train_attack, test, y_test, mae=False) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(32, activation='relu')) model.add(tf.keras.layers.Dense(16, activation='relu')) model.add(tf.keras.layers.Dense(8, activation='relu')) model.add(tf.keras.layers.Dense(4, activation='relu')) model.add(tf.keras.layers.Dense(8, activation='relu')) model.add(tf.keras.layers.Dense(16, activation='relu')) model.add(tf.keras.layers.Dense(32, activation='relu')) model.add(tf.keras.layers.Dense(39, activation='sigmoid')) model.compile(optimizer='adam', loss='mse') model.fit(train, train, batch_size=128, epochs=50, validation_split=0.1, shuffle=True, callbacks=[es]) print("TRAINED WITH LOSS 'MSE':") print('=' * 20) print('\tEVALUATE WITH MAE & QUANTILE 0.95:') test_model(model, 0.95, validation, train_attack, test, y_test) print('\tEVALUATE WITH MAE & QUANTILE 0.98:') test_model(model, 0.98, validation, train_attack, test, y_test) print('\tEVALUATE WITH MSE & QUANTILE 0.95:') test_model(model, 0.95, validation, train_attack, test, y_test, mae=False) print('\tEVALUATE WITH MSE & QUANTILE 0.98:') test_model(model, 0.98, validation, train_attack, test, y_test, mae=False)
code
122263483/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix import tensorflow as tf import keras_tuner as kt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122263483/cell_3
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_parquet('/kaggle/input/sampled-datasets-v2/NF-UNSW-NB15-V2.parquet') columns_to_remove = ['L4_SRC_PORT', 'L4_DST_PORT', 'Label', 'Attack'] train, test = train_test_split(df, test_size=0.2, shuffle=True) y_test = np.array(test['Label'], dtype=np.uint0) train_benign_idx = train['Label'] == 0 train_attack_idx = train['Label'] == 1 train.drop(columns=columns_to_remove, axis=1, inplace=True) test.drop(columns=columns_to_remove, axis=1, inplace=True) train_normal = train[train_benign_idx].values train_attack = train[train_attack_idx].values scaler = MinMaxScaler() train = scaler.fit_transform(train_normal) train_attack = scaler.transform(train_attack) train, validation = train_test_split(train, test_size=0.2) print(f'Shape train data: {train.shape}') print(f'Shape validation data: {validation.shape}') print(f'Shape test data: {test.shape}')
code
33111267/cell_21
[ "text_plain_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.manifold import TSNE from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) plt.title('Hierarchical Clustering Dendrogram (truncated)') plt.xlabel('sample index') plt.ylabel('distance') dendrogram(Z, truncate_mode='lastp', p=100, show_leaf_counts=False, leaf_rotation=90.0, leaf_font_size=12.0, show_contracted=True) plt.show()
code
33111267/cell_13
[ "text_plain_output_1.png" ]
from nltk import word_tokenize from unidecode import unidecode import bs4 import nltk import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import string import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') df_clean['text'].head(30)
code
33111267/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') df['airline_sentiment'].value_counts()
code
33111267/cell_25
[ "text_plain_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.cluster import KMeans from sklearn.manifold import TSNE from sklearn.mixture import GaussianMixture from sklearn.neighbors import NearestNeighbors from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import numpy as np import numpy as np import numpy as np import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') comments = df['text'] module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) dendrogram(Z, truncate_mode='lastp', p=100, show_leaf_counts=False, leaf_rotation=90.0, leaf_font_size=12.0, show_contracted=True) from sklearn.cluster import KMeans clf_k = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=5, random_state=0) pred_y_k = clf_k.fit_predict(X) X = message_embeddings import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import NearestNeighbors from sklearn.cluster import DBSCAN from matplotlib import pyplot as plt import seaborn as sns sns.set() neigh = NearestNeighbors(n_neighbors=2) nbrs = neigh.fit(X) distances, indices = nbrs.kneighbors(X) distances = np.sort(distances, axis=0) distances = distances[:, 1] from sklearn.mixture import GaussianMixture clf_em = GaussianMixture(n_components=5, init_params='random', covariance_type='full') pred_y_em = clf_em.fit_predict(X) myset = set(pred_y_em.tolist()) my_list = list(set(myset)) export = pd.DataFrame() export['Comentario'] = comments export['cluster'] = c sns.countplot(x='cluster', data=export)
code
33111267/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') df.head(2)
code
33111267/cell_23
[ "text_plain_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.cluster import KMeans from sklearn.manifold import TSNE from sklearn.neighbors import NearestNeighbors from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import numpy as np import numpy as np import numpy as np import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) dendrogram(Z, truncate_mode='lastp', p=100, show_leaf_counts=False, leaf_rotation=90.0, leaf_font_size=12.0, show_contracted=True) from sklearn.cluster import KMeans clf_k = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=5, random_state=0) pred_y_k = clf_k.fit_predict(X) X = message_embeddings import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import NearestNeighbors from sklearn.cluster import DBSCAN from matplotlib import pyplot as plt import seaborn as sns sns.set() neigh = NearestNeighbors(n_neighbors=2) nbrs = neigh.fit(X) distances, indices = nbrs.kneighbors(X) distances = np.sort(distances, axis=0) distances = distances[:, 1] plt.plot(distances)
code
33111267/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.manifold import TSNE from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) plt.figure(figsize=(10, 8)) plt.scatter(X[:, 0], X[:, 1], c=c, cmap='prism') plt.show()
code
33111267/cell_6
[ "text_plain_output_1.png" ]
import tensorflow_hub as hub module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) print('module %s loaded' % module_url) def embed(input): return model(input)
code
33111267/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.cluster import KMeans from sklearn.manifold import TSNE from sklearn.mixture import GaussianMixture from sklearn.neighbors import NearestNeighbors from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import numpy as np import numpy as np import numpy as np import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') comments = df['text'] module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) dendrogram(Z, truncate_mode='lastp', p=100, show_leaf_counts=False, leaf_rotation=90.0, leaf_font_size=12.0, show_contracted=True) from sklearn.cluster import KMeans clf_k = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=5, random_state=0) pred_y_k = clf_k.fit_predict(X) X = message_embeddings import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import NearestNeighbors from sklearn.cluster import DBSCAN from matplotlib import pyplot as plt import seaborn as sns sns.set() neigh = NearestNeighbors(n_neighbors=2) nbrs = neigh.fit(X) distances, indices = nbrs.kneighbors(X) distances = np.sort(distances, axis=0) distances = distances[:, 1] from sklearn.mixture import GaussianMixture clf_em = GaussianMixture(n_components=5, init_params='random', covariance_type='full') pred_y_em = clf_em.fit_predict(X) myset = set(pred_y_em.tolist()) my_list = list(set(myset)) export = pd.DataFrame() export['Comentario'] = comments export['cluster'] = c pd.options.display.max_colwidth = 255 print(export.query('cluster == 3')['Comentario'].head(10))
code
33111267/cell_26
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import seaborn as sns pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') comments = df['text'] import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() len(comments)
code
33111267/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') print('Head: ', df.columns) print('\nShape: ', df.shape) print('\nDescrição:') print(df.describe())
code
33111267/cell_18
[ "text_plain_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.manifold import TSNE from unidecode import unidecode import bs4 import nltk import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) print('Clusters Encontrados') print(len(my_list))
code
33111267/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.cluster import KMeans from sklearn.manifold import TSNE from sklearn.mixture import GaussianMixture from sklearn.neighbors import NearestNeighbors from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import numpy as np import numpy as np import numpy as np import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') comments = df['text'] module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) dendrogram(Z, truncate_mode='lastp', p=100, show_leaf_counts=False, leaf_rotation=90.0, leaf_font_size=12.0, show_contracted=True) from sklearn.cluster import KMeans clf_k = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=5, random_state=0) pred_y_k = clf_k.fit_predict(X) X = message_embeddings import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import NearestNeighbors from sklearn.cluster import DBSCAN from matplotlib import pyplot as plt import seaborn as sns sns.set() neigh = NearestNeighbors(n_neighbors=2) nbrs = neigh.fit(X) distances, indices = nbrs.kneighbors(X) distances = np.sort(distances, axis=0) distances = distances[:, 1] from sklearn.mixture import GaussianMixture clf_em = GaussianMixture(n_components=5, init_params='random', covariance_type='full') pred_y_em = clf_em.fit_predict(X) myset = set(pred_y_em.tolist()) my_list = list(set(myset)) export = pd.DataFrame() export['Comentario'] = comments export['cluster'] = c pd.options.display.max_colwidth = 255 print(export.query('cluster == 2')['Comentario'].head(10))
code
33111267/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') import seaborn as sns sns.countplot(x='airline_sentiment', data=df)
code
33111267/cell_16
[ "text_plain_output_1.png" ]
from nltk import word_tokenize from sklearn.manifold import TSNE from unidecode import unidecode import bs4 import nltk import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) sns.scatterplot(x='X', y='Y', data=tsne_df)
code
33111267/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.cluster import KMeans from sklearn.manifold import TSNE from sklearn.mixture import GaussianMixture from sklearn.neighbors import NearestNeighbors from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import numpy as np import numpy as np import numpy as np import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) dendrogram(Z, truncate_mode='lastp', p=100, show_leaf_counts=False, leaf_rotation=90.0, leaf_font_size=12.0, show_contracted=True) from sklearn.cluster import KMeans clf_k = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=5, random_state=0) pred_y_k = clf_k.fit_predict(X) X = message_embeddings import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import NearestNeighbors from sklearn.cluster import DBSCAN from matplotlib import pyplot as plt import seaborn as sns sns.set() neigh = NearestNeighbors(n_neighbors=2) nbrs = neigh.fit(X) distances, indices = nbrs.kneighbors(X) distances = np.sort(distances, axis=0) distances = distances[:, 1] from sklearn.mixture import GaussianMixture clf_em = GaussianMixture(n_components=5, init_params='random', covariance_type='full') pred_y_em = clf_em.fit_predict(X) myset = set(pred_y_em.tolist()) my_list = list(set(myset)) print(len(my_list)) sns.scatterplot(x='X', y='Y', hue=pred_y_em, data=tsne_df)
code
33111267/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.cluster import KMeans from sklearn.manifold import TSNE from unidecode import unidecode import bs4 import nltk import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) from sklearn.cluster import KMeans clf_k = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=5, random_state=0) pred_y_k = clf_k.fit_predict(X) sns.scatterplot(x='X', y='Y', hue=pred_y_k, palette=['red', 'orange', 'blue'], data=tsne_df)
code
33111267/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() sns.countplot(x='airline_sentiment', data=df_balanced)
code
33111267/cell_27
[ "image_output_1.png" ]
from nltk import word_tokenize from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import fcluster from sklearn.cluster import KMeans from sklearn.manifold import TSNE from sklearn.mixture import GaussianMixture from sklearn.neighbors import NearestNeighbors from unidecode import unidecode import bs4 import matplotlib.pyplot as plt import nltk import numpy as np import numpy as np import numpy as np import pandas as pd import pandas as pd import seaborn as sns import seaborn as sns import seaborn as sns import string import tensorflow_hub as hub import unicodedata pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') comments = df['text'] module_url = 'https://tfhub.dev/google/universal-sentence-encoder/4' model = hub.load(module_url) def embed(input): return model(input) import seaborn as sns df_neg = df.query("airline_sentiment == 'negative'").head(1000).copy() df_neu = df.query("airline_sentiment == 'neutral'").head(1000).copy() df_pos = df.query("airline_sentiment == 'positive'").head(1000).copy() df_balanced = pd.concat([df_neg, df_neu, df_pos], ignore_index=True) df_balanced = df_balanced.sample(frac=1).reset_index(drop=True) comments = df_balanced['text'].copy() def limpa_dataframe(data, column, hmtl='s', emoji='s', punct='s', lower='s', stopw='s', lng='english', token='s', stm='s', lmz='s'): import nltk from nltk import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import WordNetLemmatizer import bs4 import string wn = nltk.WordNetLemmatizer() ps = nltk.PorterStemmer() stopword = nltk.corpus.stopwords.words(lng) if hmtl == 's': data[column] = data[column].apply(lambda x: bs4.BeautifulSoup(x, 'lxml').get_text()) def deEmojify(inputString): import unicodedata from unidecode import unidecode returnString = '' for character in inputString: try: character.encode('ascii') returnString += character except UnicodeEncodeError: replaced = unidecode(str(character)) if replaced != '': returnString += replaced else: try: returnString += '[' + unicodedata.name(character) + ']' except ValueError: returnString += '[x]' return returnString if emoji == 's': data[column] = data[column].apply(lambda x: deEmojify(x)) def remove_punct(text): text_nopunct = ''.join([char for char in text if char not in string.punctuation]) return text_nopunct if punct == 's': data[column] = data[column].apply(lambda x: remove_punct(x)) if lower == 's': data[column] = [token.lower() for token in data[column]] if token == 's': data[column] = [word_tokenize(word) for word in data[column]] def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text if stopw == 's': data[column] = data[column].apply(lambda x: remove_stopwords(x)) def stemming(tokenized_text): text = [ps.stem(word) for word in tokenized_text] return text if stm == 's': data[column] = data[column].apply(lambda x: stemming(x)) def lemmatizing(tokenized_text): text = [wn.lemmatize(word) for word in tokenized_text] return text if lmz == 's': data[column] = data[column].apply(lambda x: lemmatizing(x)) data[column] = [' '.join(word) for word in data[column]] return data df_clean = df_balanced.copy() df_clean = limpa_dataframe(df_clean, column='text', hmtl='s', emoji='s', punct='s', stopw='s', lng='english', token='s', stm='s', lmz='s') messages = df_clean['text'] message_embeddings = embed(messages) from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=5) tsne_result = tsne.fit_transform(message_embeddings) tsne_df = pd.DataFrame({'X': tsne_result[:, 0], 'Y': tsne_result[:, 1]}) from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster X = tsne_result Z = linkage(X, 'average', 'cosine') threshold = 1 - 0.9 c = fcluster(Z, threshold, criterion='distance') myset = set(c.tolist()) my_list = list(set(myset)) dendrogram(Z, truncate_mode='lastp', p=100, show_leaf_counts=False, leaf_rotation=90.0, leaf_font_size=12.0, show_contracted=True) from sklearn.cluster import KMeans clf_k = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=5, random_state=0) pred_y_k = clf_k.fit_predict(X) X = message_embeddings import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import NearestNeighbors from sklearn.cluster import DBSCAN from matplotlib import pyplot as plt import seaborn as sns sns.set() neigh = NearestNeighbors(n_neighbors=2) nbrs = neigh.fit(X) distances, indices = nbrs.kneighbors(X) distances = np.sort(distances, axis=0) distances = distances[:, 1] from sklearn.mixture import GaussianMixture clf_em = GaussianMixture(n_components=5, init_params='random', covariance_type='full') pred_y_em = clf_em.fit_predict(X) myset = set(pred_y_em.tolist()) my_list = list(set(myset)) export = pd.DataFrame() export['Comentario'] = comments export['cluster'] = c pd.options.display.max_colwidth = 255 print(export.query('cluster == 1')['Comentario'].head(10))
code
33111267/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd pd.options.display.max_colwidth = 255 df = pd.read_csv('../input/airline-sentiment/Tweets.csv') comments = df['text'] comments.head(30)
code
129001574/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import pandas as pd import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import dendrogram, linkage import warnings warnings.filterwarnings('ignore') from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from matplotlib import pyplot as plt pro = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv') train = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv') test_proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv') pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6 pro7 = pro6[pro6['visit_month'] <= 36] FEATURES_PPI = ['O00584', 'O14773', 'O60888', 'O94919', 'P00441', 'P00734', 'P00736', 'P00738', 'P00746', 'P00747', 'P00748', 'P00751', 'P01008', 'P01009', 'P01011', 'P01023', 'P01024', 'P01033', 'P01344', 'P01621', 'P01717', 'P01834', 'P01857', 'P01859', 'P01860', 'P01876', 'P01877', 'P02452', 'P02652', 'P02656', 'P02679', 'P02747', 'P02749', 'P02750', 'P02751', 'P02753', 'P02760', 'P02763', 'P02765', 'P02766', 'P02768', 'P02787', 'P02790', 'P04004', 'P04156', 'P04180', 'P04196', 'P04207', 'P04211', 'P04216', 'P04217', 'P04275', 'P04433', 'P05067', 'P05090', 'P05155', 'P05156', 'P05546', 'P06681', 'P06727', 'P07195', 'P07225', 'P07339', 'P07711', 'P07858', 'P07998', 'P08294', 'P08493', 'P08697', 'P09486', 'P10451', 'P10643', 'P13521', 'P13611', 'P16070', 'P18065', 'P19652', 'P19823', 'P24592', 'P25311', 'P35542', 'P36222', 'P39060', 'P40925', 'P41222', 'P43251', 'P43652', 'P49908', 'P51884', 'P54289', 'P61626', 'P80748', 'P98160', 'Q13283', 'Q13451', 'Q14118', 'Q14508', 'Q14515', 'Q14624', 'Q16270', 'Q16610', 'Q6UXB8', 'Q7Z5P9', 'Q8IWV7', 'Q8N2S1', 'Q8NBJ4', 'Q92520', 'Q92876', 'Q96PD5', 'Q9BY67', 'Q9NQ79', 'Q9Y646', 'Q9Y6R7'] SUPPLYMENT_FEATURE = ['P00450', 'P10451', 'P01033', 'P01008', 'P02647', 'P01024', 'Q92876'] FEATURES_PPI = list(set(SUPPLYMENT_FEATURE) | set(FEATURES_PPI)) data = pro7[FEATURES_PPI] corr = data.corr() corr adjoint = corr adjoint pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() FEATURES_DLE = list(set(FEATURES_ALL) - set(FEATURES_PPI)) for i in FEATURES_ALL: pro3.loc[:, i] = pro3.loc[:, i].fillna(pro3.loc[:, i].median()) pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6.drop(FEATURES_DLE, axis=1, inplace=True) finaldata = pro6 finaldata = finaldata.dropna() finaldata = finaldata.reset_index() finaldata.visit_month = finaldata.visit_month.astype('float') finaldata
code
129001574/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import pandas as pd import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import dendrogram, linkage import warnings warnings.filterwarnings('ignore') from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from matplotlib import pyplot as plt pro = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv') train = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv') test_proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv') pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6 pro7 = pro6[pro6['visit_month'] <= 36] FEATURES_PPI = ['O00584', 'O14773', 'O60888', 'O94919', 'P00441', 'P00734', 'P00736', 'P00738', 'P00746', 'P00747', 'P00748', 'P00751', 'P01008', 'P01009', 'P01011', 'P01023', 'P01024', 'P01033', 'P01344', 'P01621', 'P01717', 'P01834', 'P01857', 'P01859', 'P01860', 'P01876', 'P01877', 'P02452', 'P02652', 'P02656', 'P02679', 'P02747', 'P02749', 'P02750', 'P02751', 'P02753', 'P02760', 'P02763', 'P02765', 'P02766', 'P02768', 'P02787', 'P02790', 'P04004', 'P04156', 'P04180', 'P04196', 'P04207', 'P04211', 'P04216', 'P04217', 'P04275', 'P04433', 'P05067', 'P05090', 'P05155', 'P05156', 'P05546', 'P06681', 'P06727', 'P07195', 'P07225', 'P07339', 'P07711', 'P07858', 'P07998', 'P08294', 'P08493', 'P08697', 'P09486', 'P10451', 'P10643', 'P13521', 'P13611', 'P16070', 'P18065', 'P19652', 'P19823', 'P24592', 'P25311', 'P35542', 'P36222', 'P39060', 'P40925', 'P41222', 'P43251', 'P43652', 'P49908', 'P51884', 'P54289', 'P61626', 'P80748', 'P98160', 'Q13283', 'Q13451', 'Q14118', 'Q14508', 'Q14515', 'Q14624', 'Q16270', 'Q16610', 'Q6UXB8', 'Q7Z5P9', 'Q8IWV7', 'Q8N2S1', 'Q8NBJ4', 'Q92520', 'Q92876', 'Q96PD5', 'Q9BY67', 'Q9NQ79', 'Q9Y646', 'Q9Y6R7'] SUPPLYMENT_FEATURE = ['P00450', 'P10451', 'P01033', 'P01008', 'P02647', 'P01024', 'Q92876'] FEATURES_PPI = list(set(SUPPLYMENT_FEATURE) | set(FEATURES_PPI)) data = pro7[FEATURES_PPI] corr = data.corr() corr adjoint = corr adjoint pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() FEATURES_DLE = list(set(FEATURES_ALL) - set(FEATURES_PPI)) for i in FEATURES_ALL: pro3.loc[:, i] = pro3.loc[:, i].fillna(pro3.loc[:, i].median()) pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6.drop(FEATURES_DLE, axis=1, inplace=True) finaldata = pro6 finaldata = finaldata.dropna() finaldata = finaldata.reset_index() finaldata.visit_month = finaldata.visit_month.astype('float') finaldata target = ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4'] x = finaldata[FEATURES_PPI] y = finaldata[target] adjoint1 = np.array(adjoint)[np.newaxis, :, :] adjoint2 = np.repeat(adjoint1, len(finaldata), 0) print(adjoint2.shape) x1 = x.values[:, :, np.newaxis] print(x1.shape)
code
129001574/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import pandas as pd import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import dendrogram, linkage import warnings warnings.filterwarnings('ignore') from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from matplotlib import pyplot as plt pro = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv') train = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv') test_proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv') pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6 pro7 = pro6[pro6['visit_month'] <= 36] FEATURES_PPI = ['O00584', 'O14773', 'O60888', 'O94919', 'P00441', 'P00734', 'P00736', 'P00738', 'P00746', 'P00747', 'P00748', 'P00751', 'P01008', 'P01009', 'P01011', 'P01023', 'P01024', 'P01033', 'P01344', 'P01621', 'P01717', 'P01834', 'P01857', 'P01859', 'P01860', 'P01876', 'P01877', 'P02452', 'P02652', 'P02656', 'P02679', 'P02747', 'P02749', 'P02750', 'P02751', 'P02753', 'P02760', 'P02763', 'P02765', 'P02766', 'P02768', 'P02787', 'P02790', 'P04004', 'P04156', 'P04180', 'P04196', 'P04207', 'P04211', 'P04216', 'P04217', 'P04275', 'P04433', 'P05067', 'P05090', 'P05155', 'P05156', 'P05546', 'P06681', 'P06727', 'P07195', 'P07225', 'P07339', 'P07711', 'P07858', 'P07998', 'P08294', 'P08493', 'P08697', 'P09486', 'P10451', 'P10643', 'P13521', 'P13611', 'P16070', 'P18065', 'P19652', 'P19823', 'P24592', 'P25311', 'P35542', 'P36222', 'P39060', 'P40925', 'P41222', 'P43251', 'P43652', 'P49908', 'P51884', 'P54289', 'P61626', 'P80748', 'P98160', 'Q13283', 'Q13451', 'Q14118', 'Q14508', 'Q14515', 'Q14624', 'Q16270', 'Q16610', 'Q6UXB8', 'Q7Z5P9', 'Q8IWV7', 'Q8N2S1', 'Q8NBJ4', 'Q92520', 'Q92876', 'Q96PD5', 'Q9BY67', 'Q9NQ79', 'Q9Y646', 'Q9Y6R7'] SUPPLYMENT_FEATURE = ['P00450', 'P10451', 'P01033', 'P01008', 'P02647', 'P01024', 'Q92876'] FEATURES_PPI = list(set(SUPPLYMENT_FEATURE) | set(FEATURES_PPI)) print(len(FEATURES_PPI)) data = pro7[FEATURES_PPI] corr = data.corr() corr adjoint = corr adjoint
code
129001574/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129001574/cell_7
[ "text_plain_output_1.png" ]
pip install spektral
code
129001574/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from spektral.layers import GCNConv from spektral.layers import GCNConv from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Input, Dropout, Dense,Reshape,GlobalMaxPool1D,MaxPool1D from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam import numpy as np import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tensorflow.keras.backend as K import warnings import pandas as pd import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import dendrogram, linkage import warnings warnings.filterwarnings('ignore') from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from matplotlib import pyplot as plt pro = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv') train = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv') test_proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv') pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6 pro7 = pro6[pro6['visit_month'] <= 36] FEATURES_PPI = ['O00584', 'O14773', 'O60888', 'O94919', 'P00441', 'P00734', 'P00736', 'P00738', 'P00746', 'P00747', 'P00748', 'P00751', 'P01008', 'P01009', 'P01011', 'P01023', 'P01024', 'P01033', 'P01344', 'P01621', 'P01717', 'P01834', 'P01857', 'P01859', 'P01860', 'P01876', 'P01877', 'P02452', 'P02652', 'P02656', 'P02679', 'P02747', 'P02749', 'P02750', 'P02751', 'P02753', 'P02760', 'P02763', 'P02765', 'P02766', 'P02768', 'P02787', 'P02790', 'P04004', 'P04156', 'P04180', 'P04196', 'P04207', 'P04211', 'P04216', 'P04217', 'P04275', 'P04433', 'P05067', 'P05090', 'P05155', 'P05156', 'P05546', 'P06681', 'P06727', 'P07195', 'P07225', 'P07339', 'P07711', 'P07858', 'P07998', 'P08294', 'P08493', 'P08697', 'P09486', 'P10451', 'P10643', 'P13521', 'P13611', 'P16070', 'P18065', 'P19652', 'P19823', 'P24592', 'P25311', 'P35542', 'P36222', 'P39060', 'P40925', 'P41222', 'P43251', 'P43652', 'P49908', 'P51884', 'P54289', 'P61626', 'P80748', 'P98160', 'Q13283', 'Q13451', 'Q14118', 'Q14508', 'Q14515', 'Q14624', 'Q16270', 'Q16610', 'Q6UXB8', 'Q7Z5P9', 'Q8IWV7', 'Q8N2S1', 'Q8NBJ4', 'Q92520', 'Q92876', 'Q96PD5', 'Q9BY67', 'Q9NQ79', 'Q9Y646', 'Q9Y6R7'] SUPPLYMENT_FEATURE = ['P00450', 'P10451', 'P01033', 'P01008', 'P02647', 'P01024', 'Q92876'] FEATURES_PPI = list(set(SUPPLYMENT_FEATURE) | set(FEATURES_PPI)) data = pro7[FEATURES_PPI] corr = data.corr() corr adjoint = corr adjoint pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() FEATURES_DLE = list(set(FEATURES_ALL) - set(FEATURES_PPI)) for i in FEATURES_ALL: pro3.loc[:, i] = pro3.loc[:, i].fillna(pro3.loc[:, i].median()) pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6.drop(FEATURES_DLE, axis=1, inplace=True) finaldata = pro6 finaldata = finaldata.dropna() finaldata = finaldata.reset_index() finaldata.visit_month = finaldata.visit_month.astype('float') finaldata target = ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4'] x = finaldata[FEATURES_PPI] y = finaldata[target] adjoint1 = np.array(adjoint)[np.newaxis, :, :] adjoint2 = np.repeat(adjoint1, len(finaldata), 0) x1 = x.values[:, :, np.newaxis] from spektral.layers import GCNConv import numpy as np import tensorflow as tf from tensorflow.keras.layers import Input, Dropout, Dense, Reshape, GlobalMaxPool1D, MaxPool1D from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from spektral.layers import GCNConv from spektral.utils import normalized_adjacency import tensorflow.keras.backend as K tf.keras.utils.set_random_seed(1234) tf.random.set_seed(1234) def smape_loss(y_true, y_pred): epsilon = 0.1 y_true = y_true + 1 y_pred = y_pred + 1 numer = K.abs(y_pred - y_true) denom = K.maximum(K.abs(y_true) + K.abs(y_pred) + epsilon, 0.5 + epsilon) smape = numer / (denom / 2) * 100 smape = tf.where(tf.math.is_nan(smape), tf.zeros_like(smape), smape) return smape def calculate_smape(y_true, y_pred): y_true, y_pred = (np.array(y_true), np.array(y_pred)) numer = np.round(np.abs(y_pred - y_true), 0) denom = np.round(np.abs(y_true) + np.abs(y_pred), 0) return 1 / len(y_true) * np.sum(np.nan_to_num(numer / (denom / 2))) * 100 def build_model(): X_in = Input(shape=(len(adjoint), 1)) A_in = Input((len(adjoint), len(adjoint)), sparse=True) X_1 = GCNConv(116, activation='relu')([X_in, A_in]) X_2 = GlobalMaxPool1D()(X_1) def build_model(): X_in = Input(shape=(len(adjoint), 1)) A_in = Input((len(adjoint), len(adjoint)), sparse=True) X_1 = GCNConv(256, activation='relu')([X_in, A_in]) X_2 = GlobalMaxPool1D()(X_1) X_3 = Dense(256, activation='relu')(X_2) X_3 = Dropout(0.3)(X_3) X_4 = Dense(256, activation='relu')(X_3) X_5 = Dense(150, activation='relu')(X_4) X_6 = Dense(150, activation='relu')(X_5) X_6 = Dropout(0.3)(X_6) X_7 = Dense(128, activation='relu')(X_6) X_7 = Dropout(0.3)(X_7) X_8 = Dense(128, activation='relu')(X_7) X_8 = Dropout(0.3)(X_8) output = Dense(4, activation='linear')(X_7) model = Model(inputs=[X_in, A_in], outputs=output) return model model = build_model() optimizer = Adam(learning_rate=0.005) early_stopping = EarlyStopping(patience=10, restore_best_weights=True) model.compile(optimizer=optimizer, loss=smape_loss) model.summary()
code
129001574/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import pandas as pd import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import dendrogram, linkage import warnings warnings.filterwarnings('ignore') from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from matplotlib import pyplot as plt pro = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv') train = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv') test_proteins = pd.read_csv('/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv') pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6 pro7 = pro6[pro6['visit_month'] <= 36] FEATURES_PPI = ['O00584', 'O14773', 'O60888', 'O94919', 'P00441', 'P00734', 'P00736', 'P00738', 'P00746', 'P00747', 'P00748', 'P00751', 'P01008', 'P01009', 'P01011', 'P01023', 'P01024', 'P01033', 'P01344', 'P01621', 'P01717', 'P01834', 'P01857', 'P01859', 'P01860', 'P01876', 'P01877', 'P02452', 'P02652', 'P02656', 'P02679', 'P02747', 'P02749', 'P02750', 'P02751', 'P02753', 'P02760', 'P02763', 'P02765', 'P02766', 'P02768', 'P02787', 'P02790', 'P04004', 'P04156', 'P04180', 'P04196', 'P04207', 'P04211', 'P04216', 'P04217', 'P04275', 'P04433', 'P05067', 'P05090', 'P05155', 'P05156', 'P05546', 'P06681', 'P06727', 'P07195', 'P07225', 'P07339', 'P07711', 'P07858', 'P07998', 'P08294', 'P08493', 'P08697', 'P09486', 'P10451', 'P10643', 'P13521', 'P13611', 'P16070', 'P18065', 'P19652', 'P19823', 'P24592', 'P25311', 'P35542', 'P36222', 'P39060', 'P40925', 'P41222', 'P43251', 'P43652', 'P49908', 'P51884', 'P54289', 'P61626', 'P80748', 'P98160', 'Q13283', 'Q13451', 'Q14118', 'Q14508', 'Q14515', 'Q14624', 'Q16270', 'Q16610', 'Q6UXB8', 'Q7Z5P9', 'Q8IWV7', 'Q8N2S1', 'Q8NBJ4', 'Q92520', 'Q92876', 'Q96PD5', 'Q9BY67', 'Q9NQ79', 'Q9Y646', 'Q9Y6R7'] SUPPLYMENT_FEATURE = ['P00450', 'P10451', 'P01033', 'P01008', 'P02647', 'P01024', 'Q92876'] FEATURES_PPI = list(set(SUPPLYMENT_FEATURE) | set(FEATURES_PPI)) data = pro7[FEATURES_PPI] corr = data.corr() corr adjoint = corr adjoint pro1 = pro.pivot(index=['visit_id', 'visit_month', 'patient_id'], columns='UniProt', values='NPX').reset_index().rename_axis(None, axis=1) pro3 = pro1.dropna(thresh=1000, axis=1) FEATURES_ALL = pro3.iloc[:, 3:].columns.tolist() FEATURES_DLE = list(set(FEATURES_ALL) - set(FEATURES_PPI)) for i in FEATURES_ALL: pro3.loc[:, i] = pro3.loc[:, i].fillna(pro3.loc[:, i].median()) pro4 = pro3.dropna() pro5 = pro4.copy() for i in FEATURES_ALL: pro5.loc[:, i] = (pro4.loc[:, i] - pro4.loc[:, i].mean(axis=0)) / pro4.loc[:, i].std(axis=0) pro6 = train.merge(pro5, how='left', on='visit_id').dropna(subset=['Q92823']).rename(columns={'patient_id_x': 'patient_id', 'visit_month_x': 'visit_month'}) pro6.drop(FEATURES_DLE, axis=1, inplace=True) finaldata = pro6 finaldata = finaldata.dropna() finaldata = finaldata.reset_index() finaldata.visit_month = finaldata.visit_month.astype('float') finaldata target = ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4'] x = finaldata[FEATURES_PPI] y = finaldata[target] print(x.shape) print(y.shape)
code
73079164/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73079164/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.preprocessing import OneHotEncoder from xgboost import XGBRegressor import optuna import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') data_submission = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') object_cols = [col for col in data.columns if data[col].dtype == 'object'] num_cols = [col for col in data.columns if data[col].dtype != 'object' and col != 'target'] from sklearn.preprocessing import OneHotEncoder OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols])) OH_cols_valid = pd.DataFrame(OH_encoder.transform(X_valid[object_cols])) OH_cols_train.index = X_train.index OH_cols_valid.index = X_valid.index num_X_train = X_train.drop(object_cols, axis=1) num_X_valid = X_valid.drop(object_cols, axis=1) OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1) OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1) from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error def optimize_stuff(trial): learning_rate = trial.suggest_float('learning_rate', 0.01, 0.25, log=True) reg_lambda = trial.suggest_loguniform('reg_lambda', 1e-08, 100.0) reg_alpha = trial.suggest_loguniform('reg_alpha', 1e-08, 100.0) subsample = trial.suggest_float('subsample', 0.1, 1.0) colsample_bytree = trial.suggest_float('colsample_bytree', 0.1, 1.0) max_depth = trial.suggest_int('max_depth', 1, 7) my_model = XGBRegressor(random_state=1, tree_method='gpu_hist', gpu_id=0, predictor='gpu_predictor', n_estimators=7000, learning_rate=learning_rate, reg_lambda=reg_lambda, reg_alpha=reg_alpha, subsample=subsample, colsample_bytree=colsample_bytree, max_depth=max_depth) my_model.fit(OH_X_train, y_train, early_stopping_rounds=300, eval_set=[(OH_X_valid, y_valid)], verbose=False) preds_valid = my_model.predict(OH_X_valid) rmse = mean_squared_error(y_valid, preds_valid, squared=False) return rmse import optuna study = optuna.create_study(direction='minimize') study.optimize(optimize_stuff, n_trials=1000) best_params = study.best_params
code
17115291/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.corr() f, ax = plt.subplots(figsize=(10, 10)) sns.heatmap(data.corr(), annot=True, linewidths=0.5, fmt='.4f', ax=ax) plt.show()
code
17115291/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.corr() f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(data.corr(), annot=True, linewidths=.5, fmt= '.4f',ax=ax) plt.show() data.columns data['AverageGoal'] = data.GoalsScored / data.MatchesPlayed ax = plt.gca() pd.Series(data.Winner).replace('Germany FR', 'Germany').value_counts().plot('bar', grid=True) plt.show()
code
17115291/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.corr() f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(data.corr(), annot=True, linewidths=.5, fmt= '.4f',ax=ax) plt.show() data.columns data['AverageGoal'] = data.GoalsScored / data.MatchesPlayed data.plot(kind='scatter', x='Year', y='AverageGoal', alpha=0.8, color='blue', figsize=(6, 6)) plt.legend() plt.xlabel('Year') plt.ylabel('AverageGoal') plt.title('Scatter Plot') plt.show()
code
17115291/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os print(os.listdir('../input'))
code
17115291/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) data
code
17115291/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.corr() f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(data.corr(), annot=True, linewidths=.5, fmt= '.4f',ax=ax) plt.show() data.columns
code
17115291/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '')
code
17115291/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.corr() f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(data.corr(), annot=True, linewidths=.5, fmt= '.4f',ax=ax) plt.show() data.plot(kind='scatter', x='Year', y='GoalsScored', alpha=0.8, color='blue', figsize=(6, 6)) plt.legend() plt.xlabel('Year') plt.ylabel('GoalsScored') plt.title('Scatter Plot') plt.show()
code
17115291/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.corr() f,ax = plt.subplots(figsize=(10, 10)) sns.heatmap(data.corr(), annot=True, linewidths=.5, fmt= '.4f',ax=ax) plt.show() data.columns data['AverageGoal'] = data.GoalsScored / data.MatchesPlayed ax = plt.gca() data.plot(kind='line', x='Year', y='GoalsScored', color='green', ax=ax, grid=True, figsize=(7, 7)) data.plot(kind='line', x='Year', y='MatchesPlayed', color='red', ax=ax, grid=True) data.plot(kind='line', x='Year', y='QualifiedTeams', color='b', ax=ax, grid=True) plt.legend(loc='upper left') plt.show()
code
17115291/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.info()
code
17115291/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/WorldCups.csv') data.rename(columns={'Runners-Up': 'RunnersUp'}, inplace=True) for i in range(len(data.Attendance)): data.Attendance[i] = data.Attendance[i].replace('.', '') data.Attendance = pd.to_numeric(data.Attendance) data.corr()
code
17115291/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/WorldCups.csv') data.info()
code
72112989/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier import pandas as pd iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train) y_pred = model.predict(x_test) pred = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) pred.to_csv('result.csv', index=False) pred
code
72112989/cell_9
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_iris import pandas as pd iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target x.isnull().sum() x.describe()
code
72112989/cell_25
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix,classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train) y_pred = model.predict(x_test) y_train_pred = model.predict(x_train) print(accuracy_score(y_train, y_train_pred) * 100)
code
72112989/cell_26
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix,classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train) y_pred = model.predict(x_test) print(accuracy_score(y_test, y_pred) * 100)
code
72112989/cell_11
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_iris import pandas as pd import seaborn as sns iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target x.isnull().sum() sns.relplot(x='petal length (cm)', y='petal width (cm)', data=x)
code
72112989/cell_7
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_iris import pandas as pd iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target x.info()
code
72112989/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train)
code
72112989/cell_32
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier import numpy as np import pandas as pd iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train) y_pred = model.predict(x_test) pred = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) pred.to_csv('result.csv', index=False) y_train_pred = model.predict(x_train) data = [] for i in range(4): n = float(input()) data.append(n) data = np.reshape(data, (1, len(data))) pred = model.predict(data) if pred == 0: print('Iris-setosa') elif pred == 1: print('Iris-versicolor') elif pred == 2: print('Iris-virginica') else: pass
code
72112989/cell_28
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix,classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train) y_pred = model.predict(x_test) print(classification_report(y_test, y_pred))
code
72112989/cell_8
[ "image_output_1.png" ]
from sklearn.datasets import load_iris import pandas as pd iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target x.isnull().sum()
code
72112989/cell_15
[ "text_html_output_1.png" ]
print(x_train.shape) print(x_test.shape)
code
72112989/cell_35
[ "text_plain_output_1.png" ]
from IPython.display import Image from six import StringIO from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.tree import export_graphviz import numpy as np import pandas as pd import pydotplus iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train) y_pred = model.predict(x_test) pred = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) pred.to_csv('result.csv', index=False) y_train_pred = model.predict(x_train) data = [] for i in range(4): n = float(input()) data.append(n) data = np.reshape(data, (1, len(data))) pred = model.predict(data) from six import StringIO from IPython.display import Image from sklearn.tree import export_graphviz import pydotplus dot_data = StringIO() export_graphviz(model, out_file=dot_data, feature_names=iris.feature_names, filled=True, rounded=True, special_characters=True) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) Image(graph.create_png())
code
72112989/cell_31
[ "text_plain_output_1.png" ]
import numpy as np data = [] print('enter specifications: ') for i in range(4): if i == 0: print('SepalLengthCm:') elif i == 1: print('SepalWidthCm:') elif i == 2: print('PetalLengthCm:') elif i == 3: print('PetalWidthCm:') n = float(input()) data.append(n) data = np.reshape(data, (1, len(data))) print(data)
code
72112989/cell_10
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_iris import pandas as pd import seaborn as sns iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target x.isnull().sum() sns.relplot(x='sepal length (cm)', y='sepal width (cm)', data=x)
code
72112989/cell_27
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix,classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(x_train, y_train) y_pred = model.predict(x_test) print(confusion_matrix(y_test, y_pred))
code
72112989/cell_37
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt plt.savefig('img.png')
code
72112989/cell_12
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_iris import pandas as pd iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) y = iris.target x.isnull().sum() x.corr()
code
72112989/cell_5
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.datasets import load_iris import pandas as pd iris = load_iris() x = pd.DataFrame(data=iris.data, columns=iris.feature_names) print(x.head()) y = iris.target print(y)
code
1005915/cell_4
[ "text_html_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import numpy as np import pandas as pd pd.options.mode.chained_assignment = None import plotly.plotly as py import plotly.graph_objs as go from plotly import tools from plotly.offline import iplot, init_notebook_mode init_notebook_mode(connected=True) terror_data = pd.read_csv('../input/globalterrorismdb_0616dist.csv', encoding='ISO-8859-1', usecols=[0, 1, 2, 3, 8, 11, 13, 14, 35, 84, 100, 103]) terror_data = terror_data.rename(columns={'eventid': 'id', 'iyear': 'year', 'imonth': 'month', 'iday': 'day', 'country_txt': 'country', 'provstate': 'state', 'targtype1_txt': 'target', 'weaptype1_txt': 'weapon', 'nkill': 'fatalities', 'nwound': 'injuries'}) terror_data['fatalities'] = terror_data['fatalities'].fillna(0).astype(int) terror_data['injuries'] = terror_data['injuries'].fillna(0).astype(int) attacks_france = terror_data[terror_data.country == 'France'] terror_peryear = np.asarray(attacks_france.groupby('year').year.count()) terror_years = np.arange(1970, 2016) terror_years = np.delete(terror_years, [23]) trace = [go.Scatter(x=terror_years, y=terror_peryear, mode='lines', line=dict(color='rgb(240, 140, 45)', width=3))] layout = go.Layout(title='Terrorist Attacks by Year in France (1970-2015)', xaxis=dict(rangeslider=dict(thickness=0.05), showline=True, showgrid=False), yaxis=dict(range=[0.1, 425], showline=True, showgrid=False)) figure = dict(data=trace, layout=layout) iplot(figure)
code
1005915/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1005915/cell_3
[ "text_html_output_2.png" ]
from plotly.offline import iplot, init_notebook_mode import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.mode.chained_assignment = None import plotly.plotly as py import plotly.graph_objs as go from plotly import tools from plotly.offline import iplot, init_notebook_mode init_notebook_mode(connected=True) terror_data = pd.read_csv('../input/globalterrorismdb_0616dist.csv', encoding='ISO-8859-1', usecols=[0, 1, 2, 3, 8, 11, 13, 14, 35, 84, 100, 103]) terror_data = terror_data.rename(columns={'eventid': 'id', 'iyear': 'year', 'imonth': 'month', 'iday': 'day', 'country_txt': 'country', 'provstate': 'state', 'targtype1_txt': 'target', 'weaptype1_txt': 'weapon', 'nkill': 'fatalities', 'nwound': 'injuries'}) terror_data['fatalities'] = terror_data['fatalities'].fillna(0).astype(int) terror_data['injuries'] = terror_data['injuries'].fillna(0).astype(int) attacks_france = terror_data[terror_data.country == 'France'] attacks_france.head()
code
74056627/cell_21
[ "text_plain_output_1.png" ]
df[0] = df[0].apply(str)
code
74056627/cell_9
[ "text_plain_output_1.png" ]
code
74056627/cell_23
[ "text_plain_output_1.png" ]
df = pd.DataFrame({'a': np.random.rand(10000), 'b': np.random.rand(10000)})
code
74056627/cell_33
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd s = pd.Series(range(10000)) # Memory saving function credit to https://www.kaggle.com/gemartin/load-data-reduce-memory-usage def reduce_mem_usage(df): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """ start_mem = df.memory_usage().sum() / 1024**2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: col_type = df[col].dtype.name if col_type not in ['object', 'category', 'datetime64[ns, UTC]']: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df x = reduce_mem_usage(df) np.allclose(s.apply(relu), vect_relu(s))
code
74056627/cell_20
[ "text_plain_output_1.png" ]
df = pd.DataFrame(pd.date_range(start='1/1/2000', end='1/08/2018'))
code
74056627/cell_6
[ "text_plain_output_1.png" ]
code
74056627/cell_29
[ "text_plain_output_1.png" ]
import numpy as np def reduce_mem_usage(df): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """ start_mem = df.memory_usage().sum() / 1024 ** 2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: col_type = df[col].dtype.name if col_type not in ['object', 'category', 'datetime64[ns, UTC]']: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) elif c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024 ** 2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df x = reduce_mem_usage(df)
code