markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Define the ModelThe following example uses a standard conv-net that has 4 layers with drop-out and batch normalization between each layer. Note that we are creating the model within a `strategy.scope`.
with strategy.scope(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.BatchNormalization(input_shape=x_train.shape[1:])) model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(256, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(512, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(256)) model.add(tf.keras.layers.Activation('elu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation('softmax')) model.summary()
_____no_output_____
Apache-2.0
site/en/r2/guide/_tpu.ipynb
christophmeyer/docs
Train on the TPUTo train on the TPU, we can simply call `model.compile` under the strategy scope, and then call `model.fit` to start training. In this case, we are training for 5 epochs with 60 steps per epoch, and running evaluation at the end of 5 epochs.It may take a while for the training to start, as the data and model has to be transferred to the TPU and compiled before training can start.
with strategy.scope(): model.compile( optimizer=tf.train.AdamOptimizer(learning_rate=1e-3), loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['sparse_categorical_accuracy'] ) model.fit( (x_train, y_train), epochs=5, steps_per_epoch=60, validation_data=(x_test, y_test), validation_freq=5, )
_____no_output_____
Apache-2.0
site/en/r2/guide/_tpu.ipynb
christophmeyer/docs
Check our results with InferenceNow that we are done training, we can see how well the model can predict fashion categories:
LABEL_NAMES = ['t_shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle_boots'] from matplotlib import pyplot %matplotlib inline def plot_predictions(images, predictions): n = images.shape[0] nc = int(np.ceil(n / 4)) f, axes = pyplot.subplots(nc, 4) for i in range(nc * 4): y = i // 4 x = i % 4 axes[x, y].axis('off') label = LABEL_NAMES[np.argmax(predictions[i])] confidence = np.max(predictions[i]) if i > n: continue axes[x, y].imshow(images[i]) axes[x, y].text(0.5, -1.5, label + ': %.3f' % confidence, fontsize=12) pyplot.gcf().set_size_inches(8, 8) plot_predictions(np.squeeze(x_test[:16]), model.predict(x_test[:16]))
_____no_output_____
Apache-2.0
site/en/r2/guide/_tpu.ipynb
christophmeyer/docs
Forecasting experiments for GEFCOM 2012 Wind Dataset Install Libs
!pip3 install -U git+https://github.com/PYFTS/pyFTS !pip3 install -U git+https://github.com/cseveriano/spatio-temporal-forecasting !pip3 install -U git+https://github.com/cseveriano/evolving_clustering !pip3 install -U git+https://github.com/cseveriano/fts2image !pip3 install -U hyperopt !pip3 install -U pyts import pandas as pd import numpy as np from hyperopt import hp from spatiotemporal.util import parameter_tuning, sampling from spatiotemporal.util import experiments as ex from sklearn.metrics import mean_squared_error from google.colab import files import matplotlib.pyplot as plt import pickle import math from pyFTS.benchmarks import Measures from pyts.decomposition import SingularSpectrumAnalysis from google.colab import files import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import datetime
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Aux Functions
def normalize(df): mindf = df.min() maxdf = df.max() return (df-mindf)/(maxdf-mindf) def denormalize(norm, _min, _max): return [(n * (_max-_min)) + _min for n in norm] def getRollingWindow(index): pivot = index train_start = pivot.strftime('%Y-%m-%d') pivot = pivot + datetime.timedelta(days=20) train_end = pivot.strftime('%Y-%m-%d') pivot = pivot + datetime.timedelta(days=1) test_start = pivot.strftime('%Y-%m-%d') pivot = pivot + datetime.timedelta(days=6) test_end = pivot.strftime('%Y-%m-%d') return train_start, train_end, test_start, test_end def calculate_rolling_error(cv_name, df, forecasts, order_list): cv_results = pd.DataFrame(columns=['Split', 'RMSE', 'SMAPE']) limit = df.index[-1].strftime('%Y-%m-%d') test_end = "" index = df.index[0] for i in np.arange(len(forecasts)): train_start, train_end, test_start, test_end = getRollingWindow(index) test = df[test_start : test_end] yhat = forecasts[i] order = order_list[i] rmse = Measures.rmse(test.iloc[order:], yhat[:-1]) smape = Measures.smape(test.iloc[order:], yhat[:-1]) res = {'Split' : index.strftime('%Y-%m-%d') ,'RMSE' : rmse, 'SMAPE' : smape} cv_results = cv_results.append(res, ignore_index=True) cv_results.to_csv(cv_name+".csv") index = index + datetime.timedelta(days=7) return cv_results def get_final_forecast(norm_forecasts): forecasts_final = [] for i in np.arange(len(norm_forecasts)): f_raw = denormalize(norm_forecasts[i], min_raw, max_raw) forecasts_final.append(f_raw) return forecasts_final from spatiotemporal.test import methods_space_oahu as ms from spatiotemporal.util import parameter_tuning, sampling from spatiotemporal.util import experiments as ex from sklearn.metrics import mean_squared_error import numpy as np from hyperopt import fmin, tpe, hp, STATUS_OK, Trials from hyperopt import space_eval import traceback from . import sampling import pickle def calculate_error(loss_function, test_df, forecast, offset): error = loss_function(test_df.iloc[(offset):], forecast) print("Error : "+str(error)) return error def method_optimize(experiment, forecast_method, train_df, test_df, space, loss_function, max_evals): def objective(params): print(params) try: _output = list(params['output']) forecast = forecast_method(train_df, test_df, params) _step = params.get('step', 1) offset = params['order'] + _step - 1 error = calculate_error(loss_function, test_df[_output], forecast, offset) except Exception: traceback.print_exc() error = 1000 return {'loss': error, 'status': STATUS_OK} print("Running experiment: " + experiment) trials = Trials() best = fmin(objective, space, algo=tpe.suggest, max_evals=max_evals, trials=trials) print('best parameters: ') print(space_eval(space, best)) pickle.dump(best, open("best_" + experiment + ".pkl", "wb")) pickle.dump(trials, open("trials_" + experiment + ".pkl", "wb")) def run_search(methods, data, train, loss_function, max_evals=100, resample=None): if resample: data = sampling.resample_data(data, resample) train_df, test_df = sampling.train_test_split(data, train) for experiment, method, space in methods: method_optimize(experiment, method, train_df, test_df, space, loss_function, max_evals)
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Load Dataset
import pandas as pd import matplotlib.pyplot as plt import numpy as np import math from sklearn.metrics import mean_squared_error #columns names wind_farms = ['wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'] # read raw dataset import pandas as pd df = pd.read_csv('https://query.data.world/s/3zx2jusk4z6zvlg2dafqgshqp3oao6', parse_dates=['date'], index_col=0) df.index = pd.to_datetime(df.index, format="%Y%m%d%H") interval = ((df.index >= '2009-07') & (df.index <= '2010-08')) df = df.loc[interval] #Normalize Data # Save Min-Max for Denorm min_raw = df.min() max_raw = df.max() # Perform Normalization norm_df = normalize(df) # Tuning split tuning_df = norm_df["2009-07-01":"2009-07-31"] norm_df = norm_df["2009-08-01":"2010-08-30"] df = df["2009-08-01":"2010-08-30"]
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Forecasting Methods Persistence
def persistence_forecast(train, test, step): predictions = [] for t in np.arange(0,len(test), step): yhat = [test.iloc[t]] * step predictions.extend(yhat) return predictions def rolling_cv_persistence(df, step): forecasts = [] lags_list = [] limit = df.index[-1].strftime('%Y-%m-%d') test_end = "" index = df.index[0] while test_end < limit : print("Index: ", index.strftime('%Y-%m-%d')) train_start, train_end, test_start, test_end = getRollingWindow(index) index = index + datetime.timedelta(days=7) train = df[train_start : train_end] test = df[test_start : test_end] yhat = persistence_forecast(train, test, step) lags_list.append(1) forecasts.append(yhat) return forecasts, lags_list forecasts_raw, order_list = rolling_cv_persistence(norm_df, 1) forecasts_final = get_final_forecast(forecasts_raw) calculate_rolling_error("rolling_cv_wind_raw_persistence", norm_df, forecasts_final, order_list) files.download('rolling_cv_wind_raw_persistence.csv')
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
VAR
from statsmodels.tsa.api import VAR, DynamicVAR def evaluate_VAR_models(test_name, train, validation,target, maxlags_list): var_results = pd.DataFrame(columns=['Order','RMSE']) best_score, best_cfg, best_model = float("inf"), None, None for lgs in maxlags_list: model = VAR(train) results = model.fit(maxlags=lgs, ic='aic') order = results.k_ar forecast = [] for i in range(len(validation)-order) : forecast.extend(results.forecast(validation.values[i:i+order],1)) forecast_df = pd.DataFrame(columns=validation.columns, data=forecast) rmse = Measures.rmse(validation[target].iloc[order:], forecast_df[target].values) if rmse < best_score: best_score, best_cfg, best_model = rmse, order, results res = {'Order' : str(order) ,'RMSE' : rmse} print('VAR (%s) RMSE=%.3f' % (str(order),rmse)) var_results = var_results.append(res, ignore_index=True) var_results.to_csv(test_name+".csv") print('Best VAR(%s) RMSE=%.3f' % (best_cfg, best_score)) return best_model def var_forecast(train, test, params): order = params['order'] step = params['step'] model = VAR(train.values) results = model.fit(maxlags=order) lag_order = results.k_ar print("Lag order:" + str(lag_order)) forecast = [] for i in np.arange(0,len(test)-lag_order+1,step) : forecast.extend(results.forecast(test.values[i:i+lag_order],step)) forecast_df = pd.DataFrame(columns=test.columns, data=forecast) return forecast_df.values, lag_order def rolling_cv_var(df, params): forecasts = [] order_list = [] limit = df.index[-1].strftime('%Y-%m-%d') test_end = "" index = df.index[0] while test_end < limit : print("Index: ", index.strftime('%Y-%m-%d')) train_start, train_end, test_start, test_end = getRollingWindow(index) index = index + datetime.timedelta(days=7) train = df[train_start : train_end] test = df[test_start : test_end] # Concat train & validation for test yhat, lag_order = var_forecast(train, test, params) forecasts.append(yhat) order_list.append(lag_order) return forecasts, order_list params_raw = {'order': 4, 'step': 1} forecasts_raw, order_list = rolling_cv_var(norm_df, params_raw) forecasts_final = get_final_forecast(forecasts_raw) calculate_rolling_error("rolling_cv_wind_raw_var", df, forecasts_final, order_list) files.download('rolling_cv_wind_raw_var.csv')
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
e-MVFTS
from spatiotemporal.models.clusteredmvfts.fts import evolvingclusterfts def evolvingfts_forecast(train_df, test_df, params, train_model=True): _variance_limit = params['variance_limit'] _defuzzy = params['defuzzy'] _t_norm = params['t_norm'] _membership_threshold = params['membership_threshold'] _order = params['order'] _step = params['step'] model = evolvingclusterfts.EvolvingClusterFTS(variance_limit=_variance_limit, defuzzy=_defuzzy, t_norm=_t_norm, membership_threshold=_membership_threshold) model.fit(train_df.values, order=_order, verbose=False) forecast = model.predict(test_df.values, steps_ahead=_step) forecast_df = pd.DataFrame(data=forecast, columns=test_df.columns) return forecast_df.values def rolling_cv_evolving(df, params): forecasts = [] order_list = [] limit = df.index[-1].strftime('%Y-%m-%d') test_end = "" index = df.index[0] first_time = True while test_end < limit : print("Index: ", index.strftime('%Y-%m-%d')) train_start, train_end, test_start, test_end = getRollingWindow(index) index = index + datetime.timedelta(days=7) train = df[train_start : train_end] test = df[test_start : test_end] # Concat train & validation for test yhat = list(evolvingfts_forecast(train, test, params, train_model=first_time)) #yhat.append(yhat[-1]) #para manter o formato do vetor de metricas forecasts.append(yhat) order_list.append(params['order']) first_time = False return forecasts, order_list params_raw = {'variance_limit': 0.001, 'order': 2, 'defuzzy': 'weighted', 't_norm': 'threshold', 'membership_threshold': 0.6, 'step':1} forecasts_raw, order_list = rolling_cv_evolving(norm_df, params_raw) forecasts_final = get_final_forecast(forecasts_raw) calculate_rolling_error("rolling_cv_wind_raw_emvfts", df, forecasts_final, order_list) files.download('rolling_cv_wind_raw_emvfts.csv')
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
MLP
from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout from keras.constraints import maxnorm from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.layers.normalization import BatchNormalization # convert series to supervised learning def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = pd.DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # put it all together agg = pd.concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
MLP Parameter Tuning
from spatiotemporal.util import parameter_tuning, sampling from spatiotemporal.util import experiments as ex from sklearn.metrics import mean_squared_error from hyperopt import hp import numpy as np mlp_space = {'choice': hp.choice('num_layers', [ {'layers': 'two', }, {'layers': 'three', 'units3': hp.choice('units3', [8, 16, 64, 128, 256, 512]), 'dropout3': hp.choice('dropout3', [0, 0.25, 0.5, 0.75]) } ]), 'units1': hp.choice('units1', [8, 16, 64, 128, 256, 512]), 'units2': hp.choice('units2', [8, 16, 64, 128, 256, 512]), 'dropout1': hp.choice('dropout1', [0, 0.25, 0.5, 0.75]), 'dropout2': hp.choice('dropout2', [0, 0.25, 0.5, 0.75]), 'batch_size': hp.choice('batch_size', [28, 64, 128, 256, 512]), 'order': hp.choice('order', [1, 2, 3]), 'input': hp.choice('input', [wind_farms]), 'output': hp.choice('output', [wind_farms]), 'epochs': hp.choice('epochs', [100, 200, 300])} def mlp_tuning(train_df, test_df, params): _input = list(params['input']) _nlags = params['order'] _epochs = params['epochs'] _batch_size = params['batch_size'] nfeat = len(train_df.columns) nsteps = params.get('step',1) nobs = _nlags * nfeat output_index = -nfeat*nsteps train_reshaped_df = series_to_supervised(train_df[_input], n_in=_nlags, n_out=nsteps) train_X, train_Y = train_reshaped_df.iloc[:, :nobs].values, train_reshaped_df.iloc[:, output_index:].values test_reshaped_df = series_to_supervised(test_df[_input], n_in=_nlags, n_out=nsteps) test_X, test_Y = test_reshaped_df.iloc[:, :nobs].values, test_reshaped_df.iloc[:, output_index:].values # design network model = Sequential() model.add(Dense(params['units1'], input_dim=train_X.shape[1], activation='relu')) model.add(Dropout(params['dropout1'])) model.add(BatchNormalization()) model.add(Dense(params['units2'], activation='relu')) model.add(Dropout(params['dropout2'])) model.add(BatchNormalization()) if params['choice']['layers'] == 'three': model.add(Dense(params['choice']['units3'], activation='relu')) model.add(Dropout(params['choice']['dropout3'])) model.add(BatchNormalization()) model.add(Dense(train_Y.shape[1], activation='sigmoid')) model.compile(loss='mse', optimizer='adam') # includes the call back object model.fit(train_X, train_Y, epochs=_epochs, batch_size=_batch_size, verbose=False, shuffle=False) # predict the test set forecast = model.predict(test_X, verbose=False) return forecast methods = [] methods.append(("EXP_OAHU_MLP", mlp_tuning, mlp_space)) train_split = 0.6 run_search(methods, tuning_df, train_split, Measures.rmse, max_evals=30, resample=None)
Running experiment: EXP_OAHU_MLP {'batch_size': 256, 'choice': {'layers': 'two'}, 'dropout1': 0, 'dropout2': 0.25, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 3, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 16, 'units2': 512} Error : 0.11210207774258987 {'batch_size': 64, 'choice': {'dropout3': 0.75, 'layers': 'three', 'units3': 8}, 'dropout1': 0.75, 'dropout2': 0.75, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 64, 'units2': 8} Error : 0.16887562719906232 {'batch_size': 512, 'choice': {'dropout3': 0.5, 'layers': 'three', 'units3': 128}, 'dropout1': 0.5, 'dropout2': 0.5, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 64, 'units2': 8} Error : 0.16832074683739862 {'batch_size': 28, 'choice': {'dropout3': 0, 'layers': 'three', 'units3': 256}, 'dropout1': 0.25, 'dropout2': 0, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 64, 'units2': 64} Error : 0.12007328735895494 {'batch_size': 28, 'choice': {'dropout3': 0.5, 'layers': 'three', 'units3': 256}, 'dropout1': 0.75, 'dropout2': 0.25, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 3, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 512, 'units2': 16} Error : 0.11256583928262713 {'batch_size': 256, 'choice': {'dropout3': 0.25, 'layers': 'three', 'units3': 64}, 'dropout1': 0.5, 'dropout2': 0.5, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 3, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 512, 'units2': 16} Error : 0.14391026899955472 {'batch_size': 256, 'choice': {'dropout3': 0.75, 'layers': 'three', 'units3': 64}, 'dropout1': 0, 'dropout2': 0, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 16, 'units2': 64} Error : 0.11037676055120181 {'batch_size': 512, 'choice': {'dropout3': 0.75, 'layers': 'three', 'units3': 128}, 'dropout1': 0.25, 'dropout2': 0.25, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 256, 'units2': 512} Error : 0.15784381475268033 {'batch_size': 512, 'choice': {'dropout3': 0.75, 'layers': 'three', 'units3': 256}, 'dropout1': 0.75, 'dropout2': 0, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 8} Error : 0.16657000728035204 {'batch_size': 512, 'choice': {'layers': 'two'}, 'dropout1': 0.75, 'dropout2': 0.25, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 3, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 512, 'units2': 8} Error : 0.26202963425973014 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 512, 'units2': 64} Error : 0.08758667541932756 {'batch_size': 28, 'choice': {'dropout3': 0, 'layers': 'three', 'units3': 256}, 'dropout1': 0.5, 'dropout2': 0.75, 'epochs': 100, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 64, 'units2': 16} Error : 0.139826483409004 {'batch_size': 128, 'choice': {'layers': 'two'}, 'dropout1': 0.5, 'dropout2': 0.75, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 16, 'units2': 256} Error : 0.12880869981278525 {'batch_size': 128, 'choice': {'dropout3': 0.25, 'layers': 'three', 'units3': 8}, 'dropout1': 0, 'dropout2': 0.75, 'epochs': 100, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 3, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 512, 'units2': 64} Error : 0.16604021900218402 {'batch_size': 128, 'choice': {'layers': 'two'}, 'dropout1': 0, 'dropout2': 0.5, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 256} Error : 0.09555621269300194 {'batch_size': 256, 'choice': {'dropout3': 0.75, 'layers': 'three', 'units3': 64}, 'dropout1': 0.75, 'dropout2': 0.25, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 8, 'units2': 8} Error : 0.1711557976639845 {'batch_size': 28, 'choice': {'layers': 'two'}, 'dropout1': 0.75, 'dropout2': 0, 'epochs': 100, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 3, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 8, 'units2': 64} Error : 0.1638326118189065 {'batch_size': 256, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0, 'epochs': 100, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 8, 'units2': 512} Error : 0.15831764665590864 {'batch_size': 256, 'choice': {'layers': 'two'}, 'dropout1': 0.5, 'dropout2': 0.75, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 256, 'units2': 256} Error : 0.14529388682505784 {'batch_size': 64, 'choice': {'dropout3': 0.25, 'layers': 'three', 'units3': 512}, 'dropout1': 0.25, 'dropout2': 0.75, 'epochs': 300, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 1, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 8, 'units2': 8} Error : 0.1414119809552915 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.09542121366565244 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.08515883577119714 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.084967455912928 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.08816597673392379 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.08461966850490099 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.08416671260635603 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.08203448953925911 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.09141701084487909 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} Error : 0.08625258845773652 {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 256, 'units2': 128} Error : 0.0846710829000828 100%|██████████| 30/30 [02:15<00:00, 4.52s/trial, best loss: 0.08203448953925911] best parameters: {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128}
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
MLP Forecasting
def mlp_multi_forecast(train_df, test_df, params): nfeat = len(train_df.columns) nlags = params['order'] nsteps = params.get('step',1) nobs = nlags * nfeat output_index = -nfeat*nsteps train_reshaped_df = series_to_supervised(train_df, n_in=nlags, n_out=nsteps) train_X, train_Y = train_reshaped_df.iloc[:, :nobs].values, train_reshaped_df.iloc[:, output_index:].values test_reshaped_df = series_to_supervised(test_df, n_in=nlags, n_out=nsteps) test_X, test_Y = test_reshaped_df.iloc[:, :nobs].values, test_reshaped_df.iloc[:, output_index:].values # design network model = designMLPNetwork(train_X.shape[1], train_Y.shape[1], params) # fit network model.fit(train_X, train_Y, epochs=500, batch_size=1000, verbose=False, shuffle=False) forecast = model.predict(test_X) # fcst = [f[0] for f in forecast] fcst = forecast return fcst def designMLPNetwork(input_shape, output_shape, params): model = Sequential() model.add(Dense(params['units1'], input_dim=input_shape, activation='relu')) model.add(Dropout(params['dropout1'])) model.add(BatchNormalization()) model.add(Dense(params['units2'], activation='relu')) model.add(Dropout(params['dropout2'])) model.add(BatchNormalization()) if params['choice']['layers'] == 'three': model.add(Dense(params['choice']['units3'], activation='relu')) model.add(Dropout(params['choice']['dropout3'])) model.add(BatchNormalization()) model.add(Dense(output_shape, activation='sigmoid')) model.compile(loss='mse', optimizer='adam') return model def rolling_cv_mlp(df, params): forecasts = [] order_list = [] limit = df.index[-1].strftime('%Y-%m-%d') test_end = "" index = df.index[0] while test_end < limit : print("Index: ", index.strftime('%Y-%m-%d')) train_start, train_end, test_start, test_end = getRollingWindow(index) index = index + datetime.timedelta(days=7) train = df[train_start : train_end] test = df[test_start : test_end] # Perform forecast yhat = list(mlp_multi_forecast(train, test, params)) yhat.append(yhat[-1]) #para manter o formato do vetor de metricas forecasts.append(yhat) order_list.append(params['order']) return forecasts, order_list # Enter best params params_raw = {'batch_size': 64, 'choice': {'layers': 'two'}, 'dropout1': 0.25, 'dropout2': 0.5, 'epochs': 200, 'input': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'order': 2, 'output': ('wp1', 'wp2', 'wp3', 'wp4', 'wp5', 'wp6', 'wp7'), 'units1': 128, 'units2': 128} forecasts_raw, order_list = rolling_cv_mlp(norm_df, params_raw) forecasts_final = get_final_forecast(forecasts_raw) calculate_rolling_error("rolling_cv_wind_raw_mlp_multi", df, forecasts_final, order_list) files.download('rolling_cv_wind_raw_mlp_multi.csv')
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Granular FTS
from pyFTS.models.multivariate import granular from pyFTS.partitioners import Grid, Entropy from pyFTS.models.multivariate import variable from pyFTS.common import Membership from pyFTS.partitioners import Grid, Entropy
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Granular Parameter Tuning
granular_space = { 'npartitions': hp.choice('npartitions', [100, 150, 200]), 'order': hp.choice('order', [1, 2]), 'knn': hp.choice('knn', [1, 2, 3, 4, 5]), 'alpha_cut': hp.choice('alpha_cut', [0, 0.1, 0.2, 0.3]), 'input': hp.choice('input', [['wp1', 'wp2', 'wp3']]), 'output': hp.choice('output', [['wp1', 'wp2', 'wp3']])} def granular_tuning(train_df, test_df, params): _input = list(params['input']) _output = list(params['output']) _npartitions = params['npartitions'] _order = params['order'] _knn = params['knn'] _alpha_cut = params['alpha_cut'] _step = params.get('step',1) ## create explanatory variables exp_variables = [] for vc in _input: exp_variables.append(variable.Variable(vc, data_label=vc, alias=vc, npart=_npartitions, func=Membership.trimf, data=train_df, alpha_cut=_alpha_cut)) model = granular.GranularWMVFTS(explanatory_variables=exp_variables, target_variable=exp_variables[0], order=_order, knn=_knn) model.fit(train_df[_input], num_batches=1) if _step > 1: forecast = pd.DataFrame(columns=test_df.columns) length = len(test_df.index) for k in range(0,(length -(_order + _step - 1))): fcst = model.predict(test_df[_input], type='multivariate', start_at=k, steps_ahead=_step) forecast = forecast.append(fcst.tail(1)) else: forecast = model.predict(test_df[_input], type='multivariate') return forecast[_output].values methods = [] methods.append(("EXP_WIND_GRANULAR", granular_tuning, granular_space)) train_split = 0.6 run_search(methods, tuning_df, train_split, Measures.rmse, max_evals=10, resample=None)
Running experiment: EXP_WIND_GRANULAR {'alpha_cut': 0.1, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 1, 'npartitions': 100, 'order': 1, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.11669905532137337 {'alpha_cut': 0.2, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 1, 'npartitions': 200, 'order': 2, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.08229067276531199 {'alpha_cut': 0.2, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 2, 'npartitions': 200, 'order': 2, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.08140150942675548 {'alpha_cut': 0.1, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 1, 'npartitions': 200, 'order': 1, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.11527883387924612 {'alpha_cut': 0.2, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 1, 'npartitions': 150, 'order': 1, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.11642857063129212 {'alpha_cut': 0.2, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 3, 'npartitions': 100, 'order': 1, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.10363929653907107 {'alpha_cut': 0.3, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 5, 'npartitions': 200, 'order': 2, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.07916522355127716 {'alpha_cut': 0.3, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 3, 'npartitions': 200, 'order': 2, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.07938399286248478 {'alpha_cut': 0.1, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 2, 'npartitions': 150, 'order': 2, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.08056469602939852 {'alpha_cut': 0.2, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 5, 'npartitions': 100, 'order': 1, 'output': ('wp1', 'wp2', 'wp3')} Error : 0.09920669569870488 100%|██████████| 10/10 [00:09<00:00, 1.05trial/s, best loss: 0.07916522355127716] best parameters: {'alpha_cut': 0.3, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 5, 'npartitions': 200, 'order': 2, 'output': ('wp1', 'wp2', 'wp3')}
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Granular Forecasting
def granular_forecast(train_df, test_df, params): _input = list(params['input']) _output = list(params['output']) _npartitions = params['npartitions'] _knn = params['knn'] _alpha_cut = params['alpha_cut'] _order = params['order'] _step = params.get('step',1) ## create explanatory variables exp_variables = [] for vc in _input: exp_variables.append(variable.Variable(vc, data_label=vc, alias=vc, npart=_npartitions, func=Membership.trimf, data=train_df, alpha_cut=_alpha_cut)) model = granular.GranularWMVFTS(explanatory_variables=exp_variables, target_variable=exp_variables[0], order=_order, knn=_knn) model.fit(train_df[_input], num_batches=1) if _step > 1: forecast = pd.DataFrame(columns=test_df.columns) length = len(test_df.index) for k in range(0,(length -(_order + _step - 1))): fcst = model.predict(test_df[_input], type='multivariate', start_at=k, steps_ahead=_step) forecast = forecast.append(fcst.tail(1)) else: forecast = model.predict(test_df[_input], type='multivariate') return forecast[_output].values def rolling_cv_granular(df, params): forecasts = [] order_list = [] limit = df.index[-1].strftime('%Y-%m-%d') test_end = "" index = df.index[0] while test_end < limit : print("Index: ", index.strftime('%Y-%m-%d')) train_start, train_end, test_start, test_end = getRollingWindow(index) index = index + datetime.timedelta(days=7) train = df[train_start : train_end] test = df[test_start : test_end] # Perform forecast yhat = list(granular_forecast(train, test, params)) yhat.append(yhat[-1]) #para manter o formato do vetor de metricas forecasts.append(yhat) order_list.append(params['order']) return forecasts, order_list def granular_get_final_forecast(forecasts_raw, input): forecasts_final = [] l_min = df[input].min() l_max = df[input].max() for i in np.arange(len(forecasts_raw)): f_raw = denormalize(forecasts_raw[i], l_min, l_max) forecasts_final.append(f_raw) return forecasts_final # Enter best params params_raw = {'alpha_cut': 0.3, 'input': ('wp1', 'wp2', 'wp3'), 'knn': 5, 'npartitions': 200, 'order': 2, 'output': ('wp1', 'wp2', 'wp3')} forecasts_raw, order_list = rolling_cv_granular(norm_df, params_raw) forecasts_final = granular_get_final_forecast(forecasts_raw, list(params_raw['input'])) calculate_rolling_error("rolling_cv_wind_raw_granular", df[list(params_raw['input'])], forecasts_final, order_list) files.download('rolling_cv_wind_raw_granular.csv')
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Result Analysis
import pandas as pd from google.colab import files files.upload() def createBoxplot(filename, data, xticklabels, ylabel): # Create a figure instance fig = plt.figure(1, figsize=(9, 6)) # Create an axes instance ax = fig.add_subplot(111) # Create the boxplot bp = ax.boxplot(data, patch_artist=True) ## change outline color, fill color and linewidth of the boxes for box in bp['boxes']: # change outline color box.set( color='#7570b3', linewidth=2) # change fill color box.set( facecolor = '#AACCFF' ) ## change color and linewidth of the whiskers for whisker in bp['whiskers']: whisker.set(color='#7570b3', linewidth=2) ## change color and linewidth of the caps for cap in bp['caps']: cap.set(color='#7570b3', linewidth=2) ## change color and linewidth of the medians for median in bp['medians']: median.set(color='#FFE680', linewidth=2) ## change the style of fliers and their fill for flier in bp['fliers']: flier.set(marker='o', color='#e7298a', alpha=0.5) ## Custom x-axis labels ax.set_xticklabels(xticklabels) ax.set_ylabel(ylabel) plt.show() fig.savefig(filename, bbox_inches='tight') var_results = pd.read_csv("rolling_cv_wind_raw_var.csv") evolving_results = pd.read_csv("rolling_cv_wind_raw_emvfts.csv") mlp_results = pd.read_csv("rolling_cv_wind_raw_mlp_multi.csv") granular_results = pd.read_csv("rolling_cv_wind_raw_granular.csv") metric = 'RMSE' results_data = [evolving_results[metric],var_results[metric], mlp_results[metric], granular_results[metric]] xticks = ['e-MVFTS','VAR','MLP','FIG-FTS'] ylab = 'RMSE' createBoxplot("e-mvfts_boxplot_rmse_solar", results_data, xticks, ylab) pd.options.display.float_format = '{:.2f}'.format metric = 'RMSE' rmse_df = pd.DataFrame(columns=['e-MVFTS','VAR','MLP','FIG-FTS']) rmse_df["e-MVFTS"] = evolving_results[metric] rmse_df["VAR"] = var_results[metric] rmse_df["MLP"] = mlp_results[metric] rmse_df["FIG-FTS"] = granular_results[metric] rmse_df.std() metric = 'SMAPE' results_data = [evolving_results[metric],var_results[metric], mlp_results[metric], granular_results[metric]] xticks = ['e-MVFTS','VAR','MLP','FIG-FTS'] ylab = 'SMAPE' createBoxplot("e-mvfts_boxplot_smape_solar", results_data, xticks, ylab) metric = 'SMAPE' smape_df = pd.DataFrame(columns=['e-MVFTS','VAR','MLP','FIG-FTS']) smape_df["e-MVFTS"] = evolving_results[metric] smape_df["VAR"] = var_results[metric] smape_df["MLP"] = mlp_results[metric] smape_df["FIG-FTS"] = granular_results[metric] smape_df.std() metric = "RMSE" data = pd.DataFrame(columns=["VAR", "Evolving", "MLP", "Granular"]) data["VAR"] = var_results[metric] data["Evolving"] = evolving_results[metric] data["MLP"] = mlp_results[metric] data["Granular"] = granular_results[metric] ax = data.plot(figsize=(18,6)) ax.set(xlabel='Window', ylabel=metric) fig = ax.get_figure() #fig.savefig(path_images + exp_id + "_prequential.png") x = np.arange(len(data.columns.values)) names = data.columns.values values = data.mean().values plt.figure(figsize=(5,6)) plt.bar(x, values, align='center', alpha=0.5, width=0.9) plt.xticks(x, names) #plt.yticks(np.arange(0, 1.1, 0.1)) plt.ylabel(metric) #plt.savefig(path_images + exp_id + "_bars.png") metric = "SMAPE" data = pd.DataFrame(columns=["VAR", "Evolving", "MLP", "Granular"]) data["VAR"] = var_results[metric] data["Evolving"] = evolving_results[metric] data["MLP"] = mlp_results[metric] data["Granular"] = granular_results[metric] ax = data.plot(figsize=(18,6)) ax.set(xlabel='Window', ylabel=metric) fig = ax.get_figure() #fig.savefig(path_images + exp_id + "_prequential.png") x = np.arange(len(data.columns.values)) names = data.columns.values values = data.mean().values plt.figure(figsize=(5,6)) plt.bar(x, values, align='center', alpha=0.5, width=0.9) plt.xticks(x, names) #plt.yticks(np.arange(0, 1.1, 0.1)) plt.ylabel(metric) #plt.savefig(path_images + exp_id + "_bars.png")
_____no_output_____
MIT
notebooks/thesis_experiments/20200924_eMVFTS_Wind_Energy_Raw.ipynb
cseveriano/spatio-temporal-forecasting
Use `Lale` `AIF360` scorers to calculate and mitigate bias for credit risk AutoAI model This notebook contains the steps and code to demonstrate support of AutoAI experiments in Watson Machine Learning service. It introduces commands for bias detecting and mitigation performed with `lale.lib.aif360` module.Some familiarity with Python is helpful. This notebook uses Python 3.8. ContentsThis notebook contains the following parts:1. [Setup](setup)2. [Optimizer definition](definition)3. [Experiment Run](run)4. [Pipeline bias detection and mitigation](bias)5. [Deployment and score](scoring)6. [Clean up](cleanup)7. [Summary and next steps](summary) 1. Set up the environmentIf you are not familiar with Watson Machine Learning (WML) Service and AutoAI experiments please read more about it in the sample notebook: "Use AutoAI and Lale to predict credit risk with `ibm-watson-machine-learning`" Install and import the `ibm-watson-machine-learning`, `lale` ,`aif360` and dependencies.**Note:** `ibm-watson-machine-learning` documentation can be found here.
!pip install -U ibm-watson-machine-learning | tail -n 1 !pip install -U scikit-learn==0.23.2 | tail -n 1 !pip install -U autoai-libs | tail -n 1 !pip install -U lale | tail -n 1 !pip install -U aif360 | tail -n 1 !pip install -U liac-arff | tail -n 1 !pip install -U cvxpy | tail -n 1 !pip install -U fairlearn | tail -n 1
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Connection to WMLAuthenticate the Watson Machine Learning service on IBM Cloud. You need to provide Cloud `API key` and `location`.**Tip**: Your `Cloud API key` can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. You can also get a service specific url by going to the [**Endpoint URLs** section of the Watson Machine Learning docs](https://cloud.ibm.com/apidocs/machine-learning). You can check your instance location in your Watson Machine Learning (WML) Service instance details.You can use [IBM Cloud CLI](https://cloud.ibm.com/docs/cli/index.html) to retrieve the instance `location`.```ibmcloud login --apikey API_KEY -a https://cloud.ibm.comibmcloud resource service-instance WML_INSTANCE_NAME```**NOTE:** You can also get a service specific apikey by going to the [**Service IDs** section of the Cloud Console](https://cloud.ibm.com/iam/serviceids). From that page, click **Create**, and then copy the created key and paste it in the following cell. **Action**: Enter your `api_key` and `location` in the following cell.
api_key = 'PUT_YOUR_KEY_HERE' location = 'us-south' wml_credentials = { "apikey": api_key, "url": 'https://' + location + '.ml.cloud.ibm.com' } from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Working with spacesYou need to create a space that will be used for your work. If you do not have a space, you can use [Deployment Spaces Dashboard](https://dataplatform.cloud.ibm.com/ml-runtime/spaces?context=cpdaas) to create one.- Click **New Deployment Space**- Create an empty space- Select Cloud Object Storage- Select Watson Machine Learning instance and press **Create**- Copy `space_id` and paste it below**Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/instance-management/Space%20management.ipynb).**Action**: assign space ID below
space_id = 'PASTE YOUR SPACE ID HERE' client.spaces.list(limit=10) client.set.default_space(space_id)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Connections to COSIn next cell we read the COS credentials from the space.
cos_credentials = client.spaces.get_details(space_id=space_id)['entity']['storage']['properties']
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
2. Optimizer definition Training data connectionDefine connection information to COS bucket and training data CSV file. This example uses the [German Credit Risk dataset](https://raw.githubusercontent.com/IBM/watson-machine-learning-samples/master/cloud/data/credit_risk/credit_risk_training_light.csv).The code in next cell uploads training data to the bucket.
filename = 'german_credit_data_biased_training.csv' datasource_name = 'bluemixcloudobjectstorage' bucketname = cos_credentials['bucket_name']
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Download training data from git repository and split for training and test set.
import os, wget import pandas as pd import numpy as np from sklearn.model_selection import train_test_split url = 'https://raw.githubusercontent.com/IBM/watson-machine-learning-samples/master/cloud/data/credit_risk/german_credit_data_biased_training.csv' if not os.path.isfile(filename): wget.download(url) credit_risk_df = pd.read_csv(filename) X = credit_risk_df.drop(['Risk'], axis=1) y = credit_risk_df['Risk'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1) credit_risk_df.head()
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Create connection
conn_meta_props= { client.connections.ConfigurationMetaNames.NAME: f"Connection to Database - {datasource_name} ", client.connections.ConfigurationMetaNames.DATASOURCE_TYPE: client.connections.get_datasource_type_uid_by_name(datasource_name), client.connections.ConfigurationMetaNames.DESCRIPTION: "Connection to external Database", client.connections.ConfigurationMetaNames.PROPERTIES: { 'bucket': bucketname, 'access_key': cos_credentials['credentials']['editor']['access_key_id'], 'secret_key': cos_credentials['credentials']['editor']['secret_access_key'], 'iam_url': 'https://iam.cloud.ibm.com/identity/token', 'url': cos_credentials['endpoint_url'] } } conn_details = client.connections.create(meta_props=conn_meta_props)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
**Note**: The above connection can be initialized alternatively with `api_key` and `resource_instance_id`. The above cell can be replaced with:```conn_meta_props= { client.connections.ConfigurationMetaNames.NAME: f"Connection to Database - {db_name} ", client.connections.ConfigurationMetaNames.DATASOURCE_TYPE: client.connections.get_datasource_type_uid_by_name(db_name), client.connections.ConfigurationMetaNames.DESCRIPTION: "Connection to external Database", client.connections.ConfigurationMetaNames.PROPERTIES: { 'bucket': bucket_name, 'api_key': cos_credentials['apikey'], 'resource_instance_id': cos_credentials['resource_instance_id'], 'iam_url': 'https://iam.cloud.ibm.com/identity/token', 'url': 'https://s3.us.cloud-object-storage.appdomain.cloud' }}conn_details = client.connections.create(meta_props=conn_meta_props)```
connection_id = client.connections.get_uid(conn_details)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Define connection information to training data and upload train dataset to COS bucket.
from ibm_watson_machine_learning.helpers import DataConnection, S3Location credit_risk_conn = DataConnection( connection_asset_id=connection_id, location=S3Location(bucket=bucketname, path=filename)) credit_risk_conn._wml_client = client training_data_reference=[credit_risk_conn] credit_risk_conn.write(data=X_train.join(y_train), remote_name=filename)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Optimizer configurationProvide the input information for AutoAI optimizer:- `name` - experiment name- `prediction_type` - type of the problem- `prediction_column` - target column name- `scoring` - optimization metric- `daub_include_only_estimators` - estimators which will be included during AutoAI training. More available estimators can be found in `experiment.ClassificationAlgorithms` enum
from ibm_watson_machine_learning.experiment import AutoAI experiment = AutoAI(wml_credentials, space_id=space_id) pipeline_optimizer = experiment.optimizer( name='Credit Risk Bias detection in AutoAI', prediction_type=AutoAI.PredictionType.BINARY, prediction_column='Risk', scoring=AutoAI.Metrics.ROC_AUC_SCORE, include_only_estimators=[experiment.ClassificationAlgorithms.XGB] )
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
3. Experiment runCall the `fit()` method to trigger the AutoAI experiment. You can either use interactive mode (synchronous job) or background mode (asychronous job) by specifying `background_model=True`.
run_details = pipeline_optimizer.fit( training_data_reference=training_data_reference, background_mode=False) pipeline_optimizer.get_run_status() summary = pipeline_optimizer.summary() summary
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Get selected pipeline modelDownload pipeline model object from the AutoAI training job.
best_pipeline = pipeline_optimizer.get_pipeline()
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
4. Bias detection and mitigationThe `fairness_info` dictionary contains some fairness-related metadata. The favorable and unfavorable label are values of the target class column that indicate whether the loan was granted or denied. A protected attribute is a feature that partitions the population into groups whose outcome should have parity. The credit-risk dataset has two protected attribute columns, sex and age. Each prottected attributes has privileged and unprivileged group.Note that to use fairness metrics from lale with numpy arrays `protected_attributes.feature` need to be passed as index of the column in dataset, not as name.
fairness_info = {'favorable_labels': ['No Risk'], 'protected_attributes': [ {'feature': X.columns.get_loc('Sex'),'reference_group': ['male']}, {'feature': X.columns.get_loc('Age'), 'reference_group': [[26, 40]]}]} fairness_info
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Calculate fairness metrics We will calculate some model metrics. Accuracy describes how accurate is the model according to dataset. Disparate impact is defined by comparing outcomes between a privileged group and an unprivileged group, so it needs to check the protected attribute to determine group membership for the sample record at hand.The third calculated metric takes the disparate impact into account along with accuracy. The best value of the score is 1.0.
import sklearn.metrics from lale.lib.aif360 import disparate_impact, accuracy_and_disparate_impact accuracy_scorer = sklearn.metrics.make_scorer(sklearn.metrics.accuracy_score) print(f'accuracy {accuracy_scorer(best_pipeline, X_test.values, y_test.values):.1%}') disparate_impact_scorer = disparate_impact(**fairness_info) print(f'disparate impact {disparate_impact_scorer(best_pipeline, X_test.values, y_test.values):.2f}') combined_scorer = accuracy_and_disparate_impact(**fairness_info) print(f'accuracy and disparate impact metric {combined_scorer(best_pipeline, X_test.values, y_test.values):.2f}')
accuracy 82.4% disparate impact 0.68 accuracy and disparate impact metric 0.26
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Mitigation`Hyperopt` minimizes (best_score - score_returned_by_the_scorer), where best_score is an argument to Hyperopt and score_returned_by_the_scorer is the value returned by the scorer for each evaluation point. We will use the `Hyperopt` to tune hyperparametres of the AutoAI pipeline to get new and more fair model.
from sklearn.linear_model import LogisticRegression as LR from sklearn.tree import DecisionTreeClassifier as Tree from sklearn.neighbors import KNeighborsClassifier as KNN from lale.lib.lale import Hyperopt from lale.lib.aif360 import FairStratifiedKFold from lale import wrap_imported_operators wrap_imported_operators() prefix = best_pipeline.remove_last().freeze_trainable() prefix.visualize() new_pipeline = prefix >> (LR | Tree | KNN) new_pipeline.visualize() fair_cv = FairStratifiedKFold(**fairness_info, n_splits=3) pipeline_fairer = new_pipeline.auto_configure( X_train.values, y_train.values, optimizer=Hyperopt, cv=fair_cv, max_evals=10, scoring=combined_scorer, best_score=1.0)
100%|██████████| 10/10 [01:13<00:00, 7.35s/trial, best loss: 0.27222222222222214]
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
As with any trained model, we can evaluate and visualize the result.
print(f'accuracy {accuracy_scorer(pipeline_fairer, X_test.values, y_test.values):.1%}') print(f'disparate impact {disparate_impact_scorer(pipeline_fairer, X_test.values, y_test.values):.2f}') print(f'accuracy and disparate impact metric {combined_scorer(pipeline_fairer, X_test.values, y_test.values):.2f}') pipeline_fairer.visualize()
accuracy 75.8% disparate impact 0.86 accuracy and disparate impact metric 0.63
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
As the result demonstrates, the best model found by AI Automationhas lower accuracy and much better disparate impact as the one we sawbefore. Also, it has tuned the repair level andhas picked and tuned a classifier. These results may vary by dataset and search space. You can get source code of the created pipeline. You just need to change the below cell type `Raw NBCovert` to `code`.
pipeline_fairer.pretty_print(ipython_display=True, show_imports=False)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
5. Deploy and ScoreIn this section you will learn how to deploy and score Lale pipeline model using WML instance. Custom software_specification Created model is AutoAI model refined with Lale. We will create new software specification based on default Python 3.7 environment extended by `autoai-libs` package.
base_sw_spec_uid = client.software_specifications.get_uid_by_name("default_py3.7") print("Id of default Python 3.7 software specification is: ", base_sw_spec_uid) url = 'https://raw.githubusercontent.com/IBM/watson-machine-learning-samples/master/cloud/configs/config.yaml' if not os.path.isfile('config.yaml'): wget.download(url) !cat config.yaml
name: python37 channels: - defaults dependencies: - pip: - autoai-libs prefix: /opt/anaconda3/envs/python37
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
`config.yaml` file describes details of package extention. Now you need to store new package extention with `APIClient`.
meta_prop_pkg_extn = { client.package_extensions.ConfigurationMetaNames.NAME: "Scikt with autoai-libs", client.package_extensions.ConfigurationMetaNames.DESCRIPTION: "Pkg extension for autoai-libs", client.package_extensions.ConfigurationMetaNames.TYPE: "conda_yml" } pkg_extn_details = client.package_extensions.store(meta_props=meta_prop_pkg_extn, file_path="config.yaml") pkg_extn_uid = client.package_extensions.get_uid(pkg_extn_details) pkg_extn_url = client.package_extensions.get_href(pkg_extn_details)
Creating package extensions SUCCESS
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Create new software specification and add created package extention to it.
meta_prop_sw_spec = { client.software_specifications.ConfigurationMetaNames.NAME: "Mitigated AutoAI bases on scikit spec", client.software_specifications.ConfigurationMetaNames.DESCRIPTION: "Software specification for scikt with autoai-libs", client.software_specifications.ConfigurationMetaNames.BASE_SOFTWARE_SPECIFICATION: {"guid": base_sw_spec_uid} } sw_spec_details = client.software_specifications.store(meta_props=meta_prop_sw_spec) sw_spec_uid = client.software_specifications.get_uid(sw_spec_details) status = client.software_specifications.add_package_extension(sw_spec_uid, pkg_extn_uid)
SUCCESS
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
You can get details of created software specification using `client.software_specifications.get_details(sw_spec_uid)` Store the model
model_props = { client.repository.ModelMetaNames.NAME: "Fairer AutoAI model", client.repository.ModelMetaNames.TYPE: 'scikit-learn_0.23', client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sw_spec_uid } feature_vector = list(X.columns) published_model = client.repository.store_model( model=best_pipeline.export_to_sklearn_pipeline(), meta_props=model_props, training_data=X_train.values, training_target=y_train.values, feature_names=feature_vector, label_column_names=['Risk'] ) published_model_uid = client.repository.get_model_id(published_model)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Deployment creation
metadata = { client.deployments.ConfigurationMetaNames.NAME: "Deployment of fairer model", client.deployments.ConfigurationMetaNames.ONLINE: {} } created_deployment = client.deployments.create(published_model_uid, meta_props=metadata) deployment_id = client.deployments.get_uid(created_deployment)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Deployment scoring You need to pass scoring values as input data if the deployed model. Use `client.deployments.score()` method to get predictions from deployed model.
values = X_test.values scoring_payload = { "input_data": [{ 'values': values[:5] }] } predictions = client.deployments.score(deployment_id, scoring_payload) predictions
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/experiments/autoai/Use Lale AIF360 scorers to calculate and mitigate bias for credit risk AutoAI model.ipynb
muthukumarbala07/watson-machine-learning-samples
Trade-off between classification accuracy and reconstruction error during dimensionality reduction- Low-dimensional LSTM representations are excellent at dimensionality reduction, but are poor at reconstructing the original data- On the other hand, PCs are excellent at reconstructing the original data but these high-variance components do not preserve class information
import numpy as np import pandas as pd import scipy as sp import pickle import os import random import sys # visualizations from _plotly_future_ import v4_subplots import plotly.offline as py py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.subplots as tls import plotly.figure_factory as ff import plotly.io as pio import plotly.express as px pio.templates.default = 'plotly_white' pio.orca.config.executable = '/home/joyneelm/fire/bin/orca' colors = px.colors.qualitative.Plotly class ARGS(): roi = 300 net = 7 subnet = 'wb' train_size = 100 batch_size = 32 num_epochs = 50 zscore = 1 #gru k_hidden = 32 k_layers = 1 dims = [3, 4, 5, 10] args = ARGS() def _get_results(k_dim): RES_DIR = 'results/clip_gru_recon' load_path = (RES_DIR + '/roi_%d_net_%d' %(args.roi, args.net) + '_trainsize_%d' %(args.train_size) + '_k_hidden_%d' %(args.k_hidden) + '_kdim_%d' %(k_dim) + '_k_layers_%d' %(args.k_layers) + '_batch_size_%d' %(args.batch_size) + '_num_epochs_45' + '_z_%d.pkl' %(args.zscore)) with open(load_path, 'rb') as f: results = pickle.load(f) # print(results.keys()) return results r = {} for k_dim in args.dims: r[k_dim] = _get_results(k_dim) def _plot_fig(ss): title_text = ss if ss=='var': ss = 'mse' invert = True else: invert = False subplot_titles = ['train', 'test'] fig = tls.make_subplots(rows=1, cols=2, subplot_titles=subplot_titles, print_grid=False) for ii, x in enumerate(['train', 'test']): gru_score = {'mean':[], 'ste':[]} pca_score = {'mean':[], 'ste':[]} for k_dim in args.dims: a = r[k_dim] # gru decoder y = np.mean(a['%s_%s'%(x, ss)]) gru_score['mean'].append(y) # pca decoder y = np.mean(a['%s_pca_%s'%(x, ss)]) pca_score['mean'].append(y) x = np.arange(len(args.dims)) if invert: y = 1 - np.array(gru_score['mean']) else: y = gru_score['mean'] error_y = gru_score['ste'] trace = go.Bar(x=x, y=y, name='lstm decoder', marker_color=colors[0]) fig.add_trace(trace, 1, ii+1) if invert: y = 1 - np.array(pca_score['mean']) else: y = pca_score['mean'] error_y = pca_score['ste'] trace = go.Bar(x=x, y=y, name='pca recon', marker_color=colors[1]) fig.add_trace(trace, 1, ii+1) fig.update_xaxes(tickvals=np.arange(len(args.dims)), ticktext=args.dims) fig.update_layout(height=350, width=700, title_text=title_text) return fig
_____no_output_____
MIT
clip_gru_recon.ipynb
LCE-UMD/GRU
Mean-squared error vs number of dimensions
''' mse ''' ss = 'mse' fig = _plot_fig(ss) fig.show()
_____no_output_____
MIT
clip_gru_recon.ipynb
LCE-UMD/GRU
Variance captured vs number of dimensions
''' variance ''' ss = 'var' fig = _plot_fig(ss) fig.show()
_____no_output_____
MIT
clip_gru_recon.ipynb
LCE-UMD/GRU
R-squared vs number of dimensions
''' r2 ''' ss = 'r2' fig = _plot_fig(ss) fig.show() results = r[10] # variance not captured by pca recon pca_not = 1 - np.sum(results['pca_var']) print('percent variance captured by pca components = %0.3f' %(1 - pca_not)) # this is proportional to pca mse pca_mse = results['test_pca_mse'] # variance not captured by lstm decoder? lstm_mse = results['test_mse'] lstm_not = lstm_mse*(pca_not/pca_mse) print('percent variance captured by lstm recon = %0.3f' %(1 - lstm_not)) def _plot_fig_ext(ss): title_text = ss if ss=='var': ss = 'mse' invert = True else: invert = False subplot_titles = ['train', 'test'] fig = go.Figure() x = 'test' lstm_score = {'mean':[], 'ste':[]} pca_score = {'mean':[], 'ste':[]} lstm_acc = {'mean':[], 'ste':[]} pc_acc = {'mean':[], 'ste':[]} for k_dim in args.dims: a = r[k_dim] # lstm encoder k_sub = len(a['test']) y = np.mean(a['test']) error_y = 3/np.sqrt(k_sub)*np.std(a['test']) lstm_acc['mean'].append(y) lstm_acc['ste'].append(error_y) # lstm decoder y = np.mean(a['%s_%s'%(x, ss)]) lstm_score['mean'].append(y) lstm_score['ste'].append(error_y) # pca encoder b = r_pc[k_dim] y = np.mean(b['test']) error_y = 3/np.sqrt(k_sub)*np.std(b['test']) pc_acc['mean'].append(y) pc_acc['ste'].append(error_y) # pca decoder y = np.mean(a['%s_pca_%s'%(x, ss)]) pca_score['mean'].append(y) pca_score['ste'].append(error_y) x = np.arange(len(args.dims)) y = lstm_acc['mean'] error_y = lstm_acc['ste'] trace = go.Bar(x=x, y=y, name='GRU Accuracy', error_y=dict(type='data', array=error_y), marker_color=colors[3]) fig.add_trace(trace) y = pc_acc['mean'] error_y = pc_acc['ste'] trace = go.Bar(x=x, y=y, name='PCA Accuracy', error_y=dict(type='data', array=error_y), marker_color=colors[4]) fig.add_trace(trace) if invert: y = 1 - np.array(lstm_score['mean']) else: y = lstm_score['mean'] error_y = lstm_score['ste'] trace = go.Bar(x=x, y=y, name='GRU Reconstruction', error_y=dict(type='data', array=error_y), marker_color=colors[5]) fig.add_trace(trace) if invert: y = 1 - np.array(pca_score['mean']) else: y = pca_score['mean'] error_y = pca_score['ste'] trace = go.Bar(x=x, y=y, name='PCA Reconstruction', error_y=dict(type='data', array=error_y), marker_color=colors[2]) fig.add_trace(trace) fig.update_yaxes(title=dict(text='Accuracy or % variance', font_size=20), gridwidth=1, gridcolor='#bfbfbf', tickfont=dict(size=20)) fig.update_xaxes(title=dict(text='Number of dimensions', font_size=20), tickvals=np.arange(len(args.dims)), ticktext=args.dims, tickfont=dict(size=20)) fig.update_layout(height=470, width=570, font_color='black', legend_orientation='h', legend_font_size=20, legend_x=-0.1, legend_y=-0.3) return fig def _get_pc_results(PC_DIR, k_dim): load_path = (PC_DIR + '/roi_%d_net_%d' %(args.roi, args.net) + '_nw_%s' %(args.subnet) + '_trainsize_%d' %(args.train_size) + '_kdim_%d_batch_size_%d' %(k_dim, args.batch_size) + '_num_epochs_%d_z_%d.pkl' %(args.num_epochs, args.zscore)) with open(load_path, 'rb') as f: results = pickle.load(f) print(results.keys()) return results
_____no_output_____
MIT
clip_gru_recon.ipynb
LCE-UMD/GRU
Comparison of LSTM and PCA: classification accuracy and variance captured
''' variance ''' r_pc = {} PC_DIR = 'results/clip_pca' for k_dim in args.dims: r_pc[k_dim] = _get_pc_results(PC_DIR, k_dim) colors = px.colors.qualitative.Set3 #colors = ["#D55E00", "#009E73", "#56B4E9", "#E69F00"] ss = 'var' fig = _plot_fig_ext(ss) fig.show() fig.write_image('figures/fig3c.png')
dict_keys(['train', 'val', 't_train', 't_test', 'test']) dict_keys(['train', 'val', 't_train', 't_test', 'test']) dict_keys(['train', 'val', 't_train', 't_test', 'test']) dict_keys(['train', 'val', 't_train', 't_test', 'test'])
MIT
clip_gru_recon.ipynb
LCE-UMD/GRU
State $$x = [w,n,m,s,e,o]$$ $w$: wealth level size: 20 $n$: 401k level size: 10 $m$: mortgage level size: 10 $s$: economic state size: 8 $e$: employment state size: 2 $o$: housing state: size: 2 Action$c$: consumption amount size: 20 $b$: bond investment size: 20 $k$: stock investment derived from budget constrain once $c$ and $b$ are determined. $h$: housing consumption size, related to housing status and consumption level If $O = 1$, the agent owns a house: $A = [c, b, k, h=H, action = 1]$ sold the house $A = [c, b, k, h=H, action = 0]$ keep the house If $O = 0$, the agent do not own a house: $A = [c, b, k, h= \frac{c}{\alpha} \frac{1-\alpha}{pr}, action = 0]$ keep renting the house $A = [c, b, k, h= \frac{c}{\alpha} \frac{1-\alpha}{pr}, action = 1]$ buy a housing with H unit Housing20% down payment of mortgage, fix mortgage rate, single housing unit available, from age between 20 and 50, agents could choose to buy a house, and could choose to sell the house at any moment. $H = 1000$
%%time for t in tqdm(range(T_max-1,T_min-1, -1)): if t == T_max-1: v,cbkha = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t]))(Xs) else: v,cbkha = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t+1]))(Xs) Vgrid[:,:,:,:,:,:,t] = v.reshape(dim) cgrid[:,:,:,:,:,:,t] = cbkha[:,0].reshape(dim) bgrid[:,:,:,:,:,:,t] = cbkha[:,1].reshape(dim) kgrid[:,:,:,:,:,:,t] = cbkha[:,2].reshape(dim) hgrid[:,:,:,:,:,:,t] = cbkha[:,3].reshape(dim) agrid[:,:,:,:,:,:,t] = cbkha[:,4].reshape(dim) np.save("Value00",Vgrid)
_____no_output_____
MIT
20210519/housing_force00.ipynb
dongxulee/lifeCycle
Quadrature rules for 2.5-D resistivity modellingWe consider the evaluation of the integral$$\Phi(x, y, z) = \frac{2}{\pi} \int_0^\infty \tilde\Phi(k, y, z) \cos(k x)\, dk$$where $$\tilde\Phi(k, y, z) = K_0\left({k}{\sqrt{y^2 + z^2}}\right).$$The function $\tilde\Phi$ exhibits a different asymptotic behaviour depending on the magnitude of the argument, i.e., with $u := kr$$$u\to 0: K_0(u) \to -\ln(u)$$and$$u \to \infty: K_0(u) \to \frac{e^{-u}}{\sqrt{u}}.$$For a fixed distance $r = \sqrt{y^2 + z^2} = 1$ and $10^{-6} \le k \le 10^1$, we obtain the following figure:
k = logspace(-6, 4, 101); kk = 1e-3; u = besselk(0, k * kk); padln = 65; padexp = 15; loglog(k, u, 'k', k(1:padln), -log(kk * k(1:padln)), 'r.', ... k(end-padexp:end), exp(-kk * k(end-padexp:end))./sqrt(kk * k(end-padexp:end)), 'b.') legend('K_0(u)', '-ln(u)', 'exp(-u)/sqrt(u)') ylabel('\Phi(u)') xlabel('u')
_____no_output_____
MIT
notebooks/Quadrature.ipynb
ruboerner/notebooks
We split the integration at $k = k_0$, $0 < k_0 < \infty$.We obtain$$\int_0^\infty \tilde\Phi(k)\,dk = \int_0^{k_0}\tilde\Phi(k)\,dk + \int_{k_0}^\infty\tilde\Phi(k)\,dk.$$ Gauss-Legendre quadratureTo avoid the singularity at $k \to 0$ for the first integral, we substitute $k'=\sqrt{k / k_0}$ and obtain with $dk = 2 k_0 k' dk'$$$\int_0^{k_0}\tilde\Phi(k)\,dk = \int_0^1 g(k')\,dk' \approx \sum_{n=1}^N w_n' g(k_n') = \sum_{n=1}^N w_n \tilde\Phi(k_n)$$with $w_n = 2 k_0 k_n' w_n' $ and $k_n = k_0 k_n'^2$. Gauss-Laguerre quadratureFor the second integral, we substitute $k' = k / k_0 - 1$, define $g(k') = k_0 \tilde\Phi(k)e^{k'}$, and obtain$$\int_{k_0}^\infty\tilde\Phi(k)\,dk = \int_0^\infty e^{-k'} g(k')\,dk' \approx \sum_{n=1}^N w_n' g(k_n') = \sum_{n=1}^N w_n \tilde\Phi(k_n)$$with $w_n = k_0 e^{k_n'}w_n'$ and $k_n = k_0 (k_n'+1)$. Choice of $k_0$The actual value of $k_0$ depends on the smallest electrode spacing $r_{min}$.More precisely, $k_0 = (2 r_{min})^{-1}$. Numerical testIn the case of a point electrode with current $I$ located at $\mathbf r' = (x', y', 0)^\top$ at the surface of a homogeneous halfspace with resistivity $\rho$, we obtain for the electric potential at point $\mathbf r = (x, y, z)^\top$$$\Phi(\mathbf r) = \dfrac{\rho I}{2 \pi |\mathbf r - \mathbf r'|}.$$We try to approximate the inverse Cosine transform$$\Phi(x, y, z) = \frac{2}{\pi} \int_0^\infty \tilde\Phi(k, y, z) \cos(k x)\, dk$$for the special case of $x = 0$ ($\cos(0) = 1$) by means of the Gauss quadrature rules introduced above.For the smallest electrode spacing of, e.g., $|\mathbf r - \mathbf r'| = r_{min} = 1$ we would set $k_0 = 0.5$.
rmin = 1; rp = rmin:1:100; rp = rp(:); k0 = 1 / (2 * rmin); [x1, w1] = gauleg(0, 1, 17); [x2, w2] = gaulag(7); kn1 = k0 * x1 .* x1; wn1 = 2 * k0 * x1 .* w1; kn2 = k0 * (x2 + 1); wn2 = k0 * exp(x2) .* w2; k = [kn1(:); kn2(:)]; w = [wn1(:); wn2(:)];
_____no_output_____
MIT
notebooks/Quadrature.ipynb
ruboerner/notebooks
We check the validity of the approximation by checking against the analytical solution for the homogeneous halfspace, which, in the case of $\rho = 2 \pi$ and $I = 1$, is simply$$\Phi_a(r) = \dfrac{1}{r}.$$
k(1) v = zeros(length(rp), 1); for i = 1:length(rp) v(i) = 2 / pi * sum(w .* besselk(0, k * rp(i))); end plot(rp, v, 'r.-', rp, 1 ./ rp, 'b') xlabel('r in m') ylabel('potential in V') legend('transformed', 'analytical')
_____no_output_____
MIT
notebooks/Quadrature.ipynb
ruboerner/notebooks
In the following plot, we display the relative error of the approximation$$e(r) := \left(1 - \dfrac{\Phi(r)}{\Phi_a(r)}\right) \cdot 100 \%$$with respect to the (normalized) electrode distance.
plot(rp / rmin, 100 * (1 - v .* rp), '.-'); grid(); xlabel('r / r_{min}'); ylabel('rel. error in %'); ylim([-0.05 0.05])
_____no_output_____
MIT
notebooks/Quadrature.ipynb
ruboerner/notebooks
Introduction to Gym toolkit Gym EnvironmentsThe centerpiece of Gym is the environment, which defines the "game" in which your reinforcement algorithm will compete. An environment does not need to be a game; however, it describes the following game-like features:* **action space**: What actions can we take on the environment, at each step/episode, to alter the environment.* **observation space**: What is the current state of the portion of the environment that we can observe. Usually, we can see the entire environment.Before we begin to look at Gym, it is essential to understand some of the terminology used by this library.* **Agent** - The machine learning program or model that controls the actions.Step - One round of issuing actions that affect the observation space.* **Episode** - A collection of steps that terminates when the agent fails to meet the environment's objective, or the episode reaches the maximum number of allowed steps.* **Render** - Gym can render one frame for display after each episode.* **Reward** - A positive reinforcement that can occur at the end of each episode, after the agent acts.* **Nondeterministic** - For some environments, randomness is a factor in deciding what effects actions have on reward and changes to the observation space.
import gym def query_environment(name): env = gym.make(name) spec = gym.spec(name) print(f"Action Space: {env.action_space}") print(f"Observation Space: {env.observation_space}") print(f"Max Episode Steps: {spec.max_episode_steps}") print(f"Nondeterministic: {spec.nondeterministic}") print(f"Reward Range: {env.reward_range}") print(f"Reward Threshold: {spec.reward_threshold}") query_environment("CartPole-v1")
Action Space: Discrete(2) Observation Space: Box(-3.4028234663852886e+38, 3.4028234663852886e+38, (4,), float32) Max Episode Steps: 500 Nondeterministic: False Reward Range: (-inf, inf) Reward Threshold: 475.0
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
The CartPole-v1 environment challenges the agent to move a cart while keeping a pole balanced. The environment has an observation space of 4 continuous numbers:* Cart Position* Cart Velocity* Pole Angle* Pole Velocity At TipTo achieve this goal, the agent can take the following actions:* Push cart to the left* Push cart to the rightThere is also a continuous variant of the mountain car. This version does not simply have the motor on or off. For the continuous car the action space is a single floating point number that specifies how much forward or backward force is being applied. Simple
import random from typing import List class Environment: def __init__(self): self.steps_left = 10 def get_observation(self) -> List[float]: return [0.0, 0.0, 0.0] def get_actions(self) -> List[int]: return [0, 1] def is_done(self) -> bool: return self.steps_left == 0 def action(self, action: int) -> float: if self.is_done(): raise Exception("Game is over") self.steps_left -= 1 return random.random() class Agent: def __init__(self): self.total_reward = 0.0 def step(self, env: Environment): current_obs = env.get_observation() actions = env.get_actions() reward = env.action(random.choice(actions)) self.total_reward += reward if __name__ == "__main__": env = Environment() agent = Agent() while not env.is_done(): agent.step(env) print("Total reward got: %.4f" % agent.total_reward)
Total reward got: 4.6979
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Frozenlake
import gym env = gym.make("FrozenLake-v0") env.render() print(env.observation_space) print(env.action_space)
Discrete(4)
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
| Number | Action || ------ | ------ || 0 | Left || 1 | Down || 2 | Right || 3 | Up | We can obtain the transition probability and the reward function by just typing env.P[state][action]. So, to obtain the transition probability of moving from state S to the other states by performing the action right, we can type env.P[S][right]. But we cannot just type state S and action right directly since they are encoded as numbers. We learned that state S is encoded as 0 and the action right is encoded as 2, so, to obtain the transition probability of state S by performing the action right, we type env.P[0][2]
print(env.P[0][2])
[(0.3333333333333333, 4, 0.0, False), (0.3333333333333333, 1, 0.0, False), (0.3333333333333333, 0, 0.0, False)]
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Our output is in the form of [(transition probability, next state, reward, Is terminal state?)]
state = env.reset() env.step(1) (next_state, reward, done, info) = env.step(1)
_____no_output_____
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
- **next_state** represents the next state.- **reward** represents the obtained reward.- **done** implies whether our episode has ended. That is, if the next state is a terminal state, then our episode will end, so done will be marked as True else it will be marked as False.- **info** — Apart from the transition probability, in some cases, we also obtain other information saved as info, which is used for debugging purposes.
random_action = env.action_space.sample() next_state, reward, done, info = env.step(random_action)
_____no_output_____
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
**Generating an episode**The episode is the agent environment interaction starting from the initial state to the terminal state. The agent interacts with the environment by performing some action in each state. An episode ends if the agent reaches the terminal state. So, in the Frozen Lake environment, the episode will end if the agent reaches the terminal state, which is either the hole state (H) or goal state (G).
import gym env = gym.make("FrozenLake-v0") state = env.reset() print('Time Step 0 :') env.render() num_timesteps = 20 for t in range(num_timesteps): random_action = env.action_space.sample() new_state, reward, done, info = env.step(random_action) print ('Time Step {} :'.format(t+1)) env.render() if done: break
Time Step 0 : SFFF FHFH FFFH HFFG Time Step 1 : (Right) SFFF FHFH FFFH HFFG Time Step 2 : (Right) SFFF FHFH FFFH HFFG Time Step 3 : (Left) SFFF FHFH FFFH HFFG Time Step 4 : (Right) SFFF FHFH FFFH HFFG Time Step 5 : (Up) SFFF FHFH FFFH HFFG
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Instead of generating one episode, we can also generate a series of episodes by taking some random action in each state
import gym env = gym.make("FrozenLake-v0") num_episodes = 10 num_timesteps = 20 for i in range(num_episodes): state = env.reset() print('Time Step 0 :') env.render() for t in range(num_timesteps): random_action = env.action_space.sample() new_state, reward, done, info = env.step(random_action) print ('Time Step {} :'.format(t+1)) env.render() if done: break
_____no_output_____
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Cartpole
env = gym.make("CartPole-v0") print(env.observation_space)
Box(-3.4028234663852886e+38, 3.4028234663852886e+38, (4,), float32)
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Note that all of these values are continuous, that is:- The value of the cart position ranges from -4.8 to 4.8.- The value of the cart velocity ranges from -Inf to Inf ( to ).- The value of the pole angle ranges from -0.418 radians to 0.418 radians.- The value of the pole velocity at the tip ranges from -Inf to Inf.
print(env.reset()) print(env.observation_space.high)
[4.8000002e+00 3.4028235e+38 4.1887903e-01 3.4028235e+38]
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
It implies that:1. The maximum value of the cart position is 4.8.2. We learned that the maximum value of the cart velocity is +Inf, and we know that infinity is not really a number, so it is represented using the largest positive real value 3.4028235e+38.3. The maximum value of the pole angle is 0.418 radians.4. The maximum value of the pole velocity at the tip is +Inf, so it is represented using the largest positive real value 3.4028235e+38.
print(env.observation_space.low)
[-4.8000002e+00 -3.4028235e+38 -4.1887903e-01 -3.4028235e+38]
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
It states that:1. The minimum value of the cart position is -4.8.2. We learned that the minimum value of the cart velocity is -Inf, and we know that infinity is not really a number, so it is represented using the largest negative real value -3.4028235e+38.3. The minimum value of the pole angle is -0.418 radians.4. The minimum value of the pole velocity at the tip is -Inf, so it is represented using the largest negative real value -3.4028235e+38.
print(env.action_space)
Discrete(2)
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
| Number | Action || ------ | ------ || 0 | Push cart to the left || 1 | Push cart to the right |
import gym if __name__ == "__main__": env = gym.make("CartPole-v0") total_reward = 0.0 total_steps = 0 obs = env.reset() while True: action = env.action_space.sample() obs, reward, done, _ = env.step(action) total_reward += reward total_steps += 1 if done: break print("Episode done in %d steps, total reward %.2f" % ( total_steps, total_reward))
Episode done in 20 steps, total reward 20.00
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
WrappersVery frequently, you will want to extend the environment's functionality in some generic way. For example, imagine an environment gives you some observations, but you want to accumulate them in some buffer and provide to the agent the N last observations. This is a common scenario for dynamic computer games, when one single frame is just not enough to get the full information about the game state. Another example is when you want to be able to crop or preprocess an image's pixels to make it more convenient for the agent to digest, or if you want to normalize reward scores somehow. There are many such situations that have the same structure – you want to "wrap" the existing environment and add some extra logic for doing something. Gym provides a convenient framework for these situations – the Wrapper class. **Random action wrapper**
import gym from typing import TypeVar import random Action = TypeVar('Action') class RandomActionWrapper(gym.ActionWrapper): def __init__(self, env, epsilon=0.1): super(RandomActionWrapper, self).__init__(env) self.epsilon = epsilon def action(self, action: Action) -> Action: if random.random() < self.epsilon: print("Random!") return self.env.action_space.sample() return action if __name__ == "__main__": env = RandomActionWrapper(gym.make("CartPole-v0")) obs = env.reset() total_reward = 0.0 while True: obs, reward, done, _ = env.step(0) total_reward += reward if done: break print("Reward got: %.2f" % total_reward)
Reward got: 9.00
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Atari GAN
! wget http://www.atarimania.com/roms/Roms.rar ! mkdir /content/ROM/ ! unrar e /content/Roms.rar /content/ROM/ ! python -m atari_py.import_roms /content/ROM/
_____no_output_____
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Normal
import random import argparse import cv2 import torch import torch.nn as nn import torch.optim as optim from torch.utils.tensorboard import SummaryWriter import torchvision.utils as vutils import gym import gym.spaces import numpy as np log = gym.logger log.set_level(gym.logger.INFO) LATENT_VECTOR_SIZE = 100 DISCR_FILTERS = 64 GENER_FILTERS = 64 BATCH_SIZE = 16 # dimension input image will be rescaled IMAGE_SIZE = 64 LEARNING_RATE = 0.0001 REPORT_EVERY_ITER = 100 SAVE_IMAGE_EVERY_ITER = 1000 class InputWrapper(gym.ObservationWrapper): """ Preprocessing of input numpy array: 1. resize image into predefined size 2. move color channel axis to a first place """ def __init__(self, *args): super(InputWrapper, self).__init__(*args) assert isinstance(self.observation_space, gym.spaces.Box) old_space = self.observation_space self.observation_space = gym.spaces.Box( self.observation(old_space.low), self.observation(old_space.high), dtype=np.float32) def observation(self, observation): # resize image new_obs = cv2.resize( observation, (IMAGE_SIZE, IMAGE_SIZE)) # transform (210, 160, 3) -> (3, 210, 160) new_obs = np.moveaxis(new_obs, 2, 0) return new_obs.astype(np.float32) class Discriminator(nn.Module): def __init__(self, input_shape): super(Discriminator, self).__init__() # this pipe converges image into the single number self.conv_pipe = nn.Sequential( nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(DISCR_FILTERS*2), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(DISCR_FILTERS * 4), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(DISCR_FILTERS * 8), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1, kernel_size=4, stride=1, padding=0), nn.Sigmoid() ) def forward(self, x): conv_out = self.conv_pipe(x) return conv_out.view(-1, 1).squeeze(dim=1) class Generator(nn.Module): def __init__(self, output_shape): super(Generator, self).__init__() # pipe deconvolves input vector into (3, 64, 64) image self.pipe = nn.Sequential( nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8, kernel_size=4, stride=1, padding=0), nn.BatchNorm2d(GENER_FILTERS * 8), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(GENER_FILTERS * 4), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(GENER_FILTERS * 2), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(GENER_FILTERS), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0], kernel_size=4, stride=2, padding=1), nn.Tanh() ) def forward(self, x): return self.pipe(x) def iterate_batches(envs, batch_size=BATCH_SIZE): batch = [e.reset() for e in envs] env_gen = iter(lambda: random.choice(envs), None) while True: e = next(env_gen) obs, reward, is_done, _ = e.step(e.action_space.sample()) if np.mean(obs) > 0.01: batch.append(obs) if len(batch) == batch_size: # Normalising input between -1 to 1 batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0 yield torch.tensor(batch_np) batch.clear() if is_done: e.reset() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--cuda", default=False, action='store_true', help="Enable cuda computation") args = parser.parse_args(args={}) device = torch.device("cuda" if args.cuda else "cpu") envs = [ InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0') ] input_shape = envs[0].observation_space.shape net_discr = Discriminator(input_shape=input_shape).to(device) net_gener = Generator(output_shape=input_shape).to(device) objective = nn.BCELoss() gen_optimizer = optim.Adam( params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) dis_optimizer = optim.Adam( params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) writer = SummaryWriter() gen_losses = [] dis_losses = [] iter_no = 0 true_labels_v = torch.ones(BATCH_SIZE, device=device) fake_labels_v = torch.zeros(BATCH_SIZE, device=device) for batch_v in iterate_batches(envs): # fake samples, input is 4D: batch, filters, x, y gen_input_v = torch.FloatTensor( BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1) gen_input_v.normal_(0, 1) gen_input_v = gen_input_v.to(device) batch_v = batch_v.to(device) gen_output_v = net_gener(gen_input_v) # train discriminator dis_optimizer.zero_grad() dis_output_true_v = net_discr(batch_v) dis_output_fake_v = net_discr(gen_output_v.detach()) dis_loss = objective(dis_output_true_v, true_labels_v) + \ objective(dis_output_fake_v, fake_labels_v) dis_loss.backward() dis_optimizer.step() dis_losses.append(dis_loss.item()) # train generator gen_optimizer.zero_grad() dis_output_v = net_discr(gen_output_v) gen_loss_v = objective(dis_output_v, true_labels_v) gen_loss_v.backward() gen_optimizer.step() gen_losses.append(gen_loss_v.item()) iter_no += 1 if iter_no % REPORT_EVERY_ITER == 0: log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses)) writer.add_scalar( "gen_loss", np.mean(gen_losses), iter_no) writer.add_scalar( "dis_loss", np.mean(dis_losses), iter_no) gen_losses = [] dis_losses = [] if iter_no % SAVE_IMAGE_EVERY_ITER == 0: writer.add_image("fake", vutils.make_grid( gen_output_v.data[:64], normalize=True), iter_no) writer.add_image("real", vutils.make_grid( batch_v.data[:64], normalize=True), iter_no)
INFO: Making new env: Breakout-v0 INFO: Making new env: AirRaid-v0 INFO: Making new env: Pong-v0 INFO: Iter 100: gen_loss=5.454e+00, dis_loss=5.009e-02 INFO: Iter 200: gen_loss=7.054e+00, dis_loss=4.306e-03 INFO: Iter 300: gen_loss=7.568e+00, dis_loss=2.140e-03 INFO: Iter 400: gen_loss=7.842e+00, dis_loss=1.272e-03 INFO: Iter 500: gen_loss=8.155e+00, dis_loss=1.019e-03 INFO: Iter 600: gen_loss=8.442e+00, dis_loss=6.918e-04 INFO: Iter 700: gen_loss=8.560e+00, dis_loss=5.483e-04 INFO: Iter 800: gen_loss=9.014e+00, dis_loss=4.792e-04 INFO: Iter 900: gen_loss=7.517e+00, dis_loss=2.132e-01 INFO: Iter 1000: gen_loss=7.375e+00, dis_loss=1.050e-01 INFO: Iter 1100: gen_loss=6.722e+00, dis_loss=1.718e-02 INFO: Iter 1200: gen_loss=6.346e+00, dis_loss=6.303e-03 INFO: Iter 1300: gen_loss=6.636e+00, dis_loss=6.348e-03 INFO: Iter 1400: gen_loss=6.612e+00, dis_loss=7.664e-02 INFO: Iter 1500: gen_loss=6.028e+00, dis_loss=7.801e-03 INFO: Iter 1600: gen_loss=6.665e+00, dis_loss=3.651e-03 INFO: Iter 1700: gen_loss=7.290e+00, dis_loss=5.616e-02 INFO: Iter 1800: gen_loss=6.314e+00, dis_loss=7.723e-02 INFO: Iter 1900: gen_loss=5.940e+00, dis_loss=3.784e-01 INFO: Iter 2000: gen_loss=5.053e+00, dis_loss=2.623e-01 INFO: Iter 2100: gen_loss=5.465e+00, dis_loss=9.114e-02 INFO: Iter 2200: gen_loss=5.480e+00, dis_loss=3.963e-01 INFO: Iter 2300: gen_loss=4.549e+00, dis_loss=2.361e-01 INFO: Iter 2400: gen_loss=5.407e+00, dis_loss=1.310e-01 INFO: Iter 2500: gen_loss=5.766e+00, dis_loss=5.550e-02 INFO: Iter 2600: gen_loss=5.816e+00, dis_loss=1.418e-01 INFO: Iter 2700: gen_loss=6.737e+00, dis_loss=5.231e-02 INFO: Iter 2800: gen_loss=7.147e+00, dis_loss=1.491e-01 INFO: Iter 2900: gen_loss=6.541e+00, dis_loss=2.155e-02 INFO: Iter 3000: gen_loss=7.072e+00, dis_loss=1.127e-01 INFO: Iter 3100: gen_loss=6.137e+00, dis_loss=6.138e-02 INFO: Iter 3200: gen_loss=7.406e+00, dis_loss=3.540e-02 INFO: Iter 3300: gen_loss=7.850e+00, dis_loss=5.691e-03 INFO: Iter 3400: gen_loss=8.614e+00, dis_loss=7.228e-03 INFO: Iter 3500: gen_loss=8.885e+00, dis_loss=3.191e-03 INFO: Iter 3600: gen_loss=5.367e+00, dis_loss=5.296e-01 INFO: Iter 3700: gen_loss=4.176e+00, dis_loss=3.335e-01 INFO: Iter 3800: gen_loss=5.174e+00, dis_loss=2.732e-01 INFO: Iter 3900: gen_loss=5.492e+00, dis_loss=1.298e-01 INFO: Iter 4000: gen_loss=6.570e+00, dis_loss=1.961e-02 INFO: Iter 4100: gen_loss=7.011e+00, dis_loss=2.517e-02 INFO: Iter 4200: gen_loss=8.362e+00, dis_loss=4.330e-03 INFO: Iter 4300: gen_loss=6.908e+00, dis_loss=2.161e-01 INFO: Iter 4400: gen_loss=5.226e+00, dis_loss=2.762e-01 INFO: Iter 4500: gen_loss=4.998e+00, dis_loss=2.893e-01 INFO: Iter 4600: gen_loss=5.078e+00, dis_loss=3.962e-01 INFO: Iter 4700: gen_loss=4.886e+00, dis_loss=1.932e-01 INFO: Iter 4800: gen_loss=6.110e+00, dis_loss=7.615e-02 INFO: Iter 4900: gen_loss=5.402e+00, dis_loss=1.634e-01 INFO: Iter 5000: gen_loss=5.336e+00, dis_loss=1.919e-01 INFO: Iter 5100: gen_loss=5.749e+00, dis_loss=8.817e-02 INFO: Iter 5200: gen_loss=5.879e+00, dis_loss=1.182e-01 INFO: Iter 5300: gen_loss=5.417e+00, dis_loss=1.651e-01 INFO: Iter 5400: gen_loss=6.747e+00, dis_loss=3.846e-02 INFO: Iter 5500: gen_loss=5.133e+00, dis_loss=1.996e-01 INFO: Iter 5600: gen_loss=6.116e+00, dis_loss=2.946e-01 INFO: Iter 5700: gen_loss=5.858e+00, dis_loss=2.152e-02
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Ignite
import random import argparse import cv2 import torch import torch.nn as nn import torch.optim as optim from ignite.engine import Engine, Events from ignite.metrics import RunningAverage from ignite.contrib.handlers import tensorboard_logger as tb_logger import torchvision.utils as vutils import gym import gym.spaces import numpy as np log = gym.logger log.set_level(gym.logger.INFO) LATENT_VECTOR_SIZE = 100 DISCR_FILTERS = 64 GENER_FILTERS = 64 BATCH_SIZE = 16 # dimension input image will be rescaled IMAGE_SIZE = 64 LEARNING_RATE = 0.0001 REPORT_EVERY_ITER = 100 SAVE_IMAGE_EVERY_ITER = 1000 class InputWrapper(gym.ObservationWrapper): """ Preprocessing of input numpy array: 1. resize image into predefined size 2. move color channel axis to a first place """ def __init__(self, *args): super(InputWrapper, self).__init__(*args) assert isinstance(self.observation_space, gym.spaces.Box) old_space = self.observation_space self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high), dtype=np.float32) def observation(self, observation): # resize image new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE)) # transform (210, 160, 3) -> (3, 210, 160) new_obs = np.moveaxis(new_obs, 2, 0) return new_obs.astype(np.float32) class Discriminator(nn.Module): def __init__(self, input_shape): super(Discriminator, self).__init__() # this pipe converges image into the single number self.conv_pipe = nn.Sequential( nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(DISCR_FILTERS*2), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(DISCR_FILTERS * 4), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(DISCR_FILTERS * 8), nn.ReLU(), nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1, kernel_size=4, stride=1, padding=0), nn.Sigmoid() ) def forward(self, x): conv_out = self.conv_pipe(x) return conv_out.view(-1, 1).squeeze(dim=1) class Generator(nn.Module): def __init__(self, output_shape): super(Generator, self).__init__() # pipe deconvolves input vector into (3, 64, 64) image self.pipe = nn.Sequential( nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8, kernel_size=4, stride=1, padding=0), nn.BatchNorm2d(GENER_FILTERS * 8), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(GENER_FILTERS * 4), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(GENER_FILTERS * 2), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(GENER_FILTERS), nn.ReLU(), nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0], kernel_size=4, stride=2, padding=1), nn.Tanh() ) def forward(self, x): return self.pipe(x) def iterate_batches(envs, batch_size=BATCH_SIZE): batch = [e.reset() for e in envs] env_gen = iter(lambda: random.choice(envs), None) while True: e = next(env_gen) obs, reward, is_done, _ = e.step(e.action_space.sample()) if np.mean(obs) > 0.01: batch.append(obs) if len(batch) == batch_size: # Normalising input between -1 to 1 batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0 yield torch.tensor(batch_np) batch.clear() if is_done: e.reset() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--cuda", default=False, action='store_true', help="Enable cuda computation") args = parser.parse_args(args={}) device = torch.device("cuda" if args.cuda else "cpu") # envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')] envs = [InputWrapper(gym.make(name)) for name in ['Breakout-v0']] input_shape = envs[0].observation_space.shape net_discr = Discriminator(input_shape=input_shape).to(device) net_gener = Generator(output_shape=input_shape).to(device) objective = nn.BCELoss() gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) true_labels_v = torch.ones(BATCH_SIZE, device=device) fake_labels_v = torch.zeros(BATCH_SIZE, device=device) def process_batch(trainer, batch): gen_input_v = torch.FloatTensor( BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1) gen_input_v.normal_(0, 1) gen_input_v = gen_input_v.to(device) batch_v = batch.to(device) gen_output_v = net_gener(gen_input_v) # train discriminator dis_optimizer.zero_grad() dis_output_true_v = net_discr(batch_v) dis_output_fake_v = net_discr(gen_output_v.detach()) dis_loss = objective(dis_output_true_v, true_labels_v) + \ objective(dis_output_fake_v, fake_labels_v) dis_loss.backward() dis_optimizer.step() # train generator gen_optimizer.zero_grad() dis_output_v = net_discr(gen_output_v) gen_loss = objective(dis_output_v, true_labels_v) gen_loss.backward() gen_optimizer.step() if trainer.state.iteration % SAVE_IMAGE_EVERY_ITER == 0: fake_img = vutils.make_grid( gen_output_v.data[:64], normalize=True) trainer.tb.writer.add_image( "fake", fake_img, trainer.state.iteration) real_img = vutils.make_grid( batch_v.data[:64], normalize=True) trainer.tb.writer.add_image( "real", real_img, trainer.state.iteration) trainer.tb.writer.flush() return dis_loss.item(), gen_loss.item() engine = Engine(process_batch) tb = tb_logger.TensorboardLogger(log_dir=None) engine.tb = tb RunningAverage(output_transform=lambda out: out[1]).\ attach(engine, "avg_loss_gen") RunningAverage(output_transform=lambda out: out[0]).\ attach(engine, "avg_loss_dis") handler = tb_logger.OutputHandler(tag="train", metric_names=['avg_loss_gen', 'avg_loss_dis']) tb.attach(engine, log_handler=handler, event_name=Events.ITERATION_COMPLETED) @engine.on(Events.ITERATION_COMPLETED) def log_losses(trainer): if trainer.state.iteration % REPORT_EVERY_ITER == 0: log.info("%d: gen_loss=%f, dis_loss=%f", trainer.state.iteration, trainer.state.metrics['avg_loss_gen'], trainer.state.metrics['avg_loss_dis']) engine.run(data=iterate_batches(envs))
INFO: Making new env: Breakout-v0 INFO: 100: gen_loss=5.327549, dis_loss=0.200626 INFO: 200: gen_loss=6.850880, dis_loss=0.028281 INFO: 300: gen_loss=7.435633, dis_loss=0.004672 INFO: 400: gen_loss=7.708136, dis_loss=0.001331 INFO: 500: gen_loss=8.000729, dis_loss=0.000699 INFO: 600: gen_loss=8.314868, dis_loss=0.000474 INFO: 700: gen_loss=8.620416, dis_loss=0.000328 INFO: 800: gen_loss=8.779677, dis_loss=0.000286 INFO: 900: gen_loss=8.907359, dis_loss=0.000267 INFO: 1000: gen_loss=6.822098, dis_loss=0.558477 INFO: 1100: gen_loss=6.491067, dis_loss=0.079029 INFO: 1200: gen_loss=6.794054, dis_loss=0.012632 INFO: 1300: gen_loss=7.230944, dis_loss=0.002747 INFO: 1400: gen_loss=7.698738, dis_loss=0.000962 INFO: 1500: gen_loss=8.162801, dis_loss=0.000497 INFO: 1600: gen_loss=8.546710, dis_loss=0.000326 INFO: 1700: gen_loss=8.939020, dis_loss=0.000224 INFO: 1800: gen_loss=9.027502, dis_loss=0.000216 INFO: 1900: gen_loss=9.230650, dis_loss=0.000172 INFO: 2000: gen_loss=9.495270, dis_loss=0.000129 INFO: 2100: gen_loss=9.700210, dis_loss=0.000104 INFO: 2200: gen_loss=9.862649, dis_loss=0.000086 INFO: 2300: gen_loss=10.042667, dis_loss=0.000075 INFO: 2400: gen_loss=10.333560, dis_loss=0.000052 INFO: 2500: gen_loss=10.437976, dis_loss=0.000045 INFO: 2600: gen_loss=10.592011, dis_loss=0.000040 INFO: 2700: gen_loss=10.633485, dis_loss=0.000039 INFO: 2800: gen_loss=10.627324, dis_loss=0.000036 INFO: 2900: gen_loss=10.665850, dis_loss=0.000036 INFO: 3000: gen_loss=10.712931, dis_loss=0.000036 INFO: 3100: gen_loss=10.853663, dis_loss=0.000030 INFO: 3200: gen_loss=10.868406, dis_loss=0.000030 INFO: 3300: gen_loss=10.904878, dis_loss=0.000027 INFO: 3400: gen_loss=11.031057, dis_loss=0.000022 INFO: 3500: gen_loss=11.114413, dis_loss=0.000022 INFO: 3600: gen_loss=11.334650, dis_loss=0.000018 INFO: 3700: gen_loss=11.537755, dis_loss=0.000013 INFO: 3800: gen_loss=11.573673, dis_loss=0.000015 INFO: 3900: gen_loss=11.594438, dis_loss=0.000013 INFO: 4000: gen_loss=11.650991, dis_loss=0.000012 INFO: 4100: gen_loss=11.350557, dis_loss=0.000023 INFO: 4200: gen_loss=11.715774, dis_loss=0.000012 INFO: 4300: gen_loss=11.970108, dis_loss=0.000008 INFO: 4400: gen_loss=12.142686, dis_loss=0.000007 INFO: 4500: gen_loss=12.200508, dis_loss=0.000007 INFO: 4600: gen_loss=12.209455, dis_loss=0.000006 INFO: 4700: gen_loss=12.215595, dis_loss=0.000007 INFO: 4800: gen_loss=12.352226, dis_loss=0.000006 INFO: 4900: gen_loss=12.434466, dis_loss=0.000006 INFO: 5000: gen_loss=12.517082, dis_loss=0.000005 INFO: 5100: gen_loss=12.604175, dis_loss=0.000005 INFO: 5200: gen_loss=12.744095, dis_loss=0.000004 INFO: 5300: gen_loss=12.880165, dis_loss=0.000004 INFO: 5400: gen_loss=12.999031, dis_loss=0.000003
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Render environments in Colab Alternative 1It is possible to visualize the game your agent is playing, even on CoLab. This section provides information on how to generate a video in CoLab that shows you an episode of the game your agent is playing. This video process is based on suggestions found [here](https://colab.research.google.com/drive/1flu31ulJlgiRL1dnN2ir8wGh9p7Zij2t).Begin by installing **pyvirtualdisplay** and **python-opengl**.
!pip install gym pyvirtualdisplay > /dev/null 2>&1 !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 !apt-get update > /dev/null 2>&1 !apt-get install cmake > /dev/null 2>&1 !pip install --upgrade setuptools 2>&1 !pip install ez_setup > /dev/null 2>&1 !pip install gym[atari] > /dev/null 2>&1 !wget http://www.atarimania.com/roms/Roms.rar !mkdir /content/ROM/ !unrar e /content/Roms.rar /content/ROM/ !python -m atari_py.import_roms /content/ROM/ import gym from gym.wrappers import Monitor import glob import io import base64 from IPython.display import HTML from pyvirtualdisplay import Display from IPython import display as ipythondisplay display = Display(visible=0, size=(1400, 900)) display.start() """ Utility functions to enable video recording of gym environment and displaying it. To enable video, just do "env = wrap_env(env)"" """ def show_video(): mp4list = glob.glob('video/*.mp4') if len(mp4list) > 0: mp4 = mp4list[0] video = io.open(mp4, 'r+b').read() encoded = base64.b64encode(video) ipythondisplay.display(HTML(data='''<video alt="test" autoplay loop controls style="height: 400px;"> <source src="data:video/mp4;base64,{0}" type="video/mp4" /> </video>'''.format(encoded.decode('ascii')))) else: print("Could not find video") def wrap_env(env): env = Monitor(env, './video', force=True) return env #env = wrap_env(gym.make("MountainCar-v0")) env = wrap_env(gym.make("Atlantis-v0")) observation = env.reset() while True: env.render() #your agent goes here action = env.action_space.sample() observation, reward, done, info = env.step(action) if done: break; env.close() show_video()
_____no_output_____
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Alternative 2
!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 !pip install colabgymrender import gym from colabgymrender.recorder import Recorder env = gym.make("Breakout-v0") directory = './video' env = Recorder(env, directory) observation = env.reset() terminal = False while not terminal: action = env.action_space.sample() observation, reward, terminal, info = env.step(action) env.play()
_____no_output_____
Apache-2.0
_docs/nbs/T726861-Introduction-to-Gym-toolkit.ipynb
sparsh-ai/recohut
Implementation of Stack Stack Attributes and MethodsBefore we implement our own Stack class, let's review the properties and methods of a Stack.The stack abstract data type is defined by the following structure and operations. A stack is structured, as described above, as an ordered collection of items where items are added to and removed from the end called the “top.” Stacks are ordered LIFO. The stack operations are given below.* Stack() creates a new stack that is empty. It needs no parameters and returns an empty stack.* push(item) adds a new item to the top of the stack. It needs the item and returns nothing.* pop() removes the top item from the stack. It needs no parameters and returns the item. The stack is modified.* peek() returns the top item from the stack but does not remove it. It needs no parameters. The stack is not modified.* isEmpty() tests to see whether the stack is empty. It needs no parameters and returns a boolean value.* size() returns the number of items on the stack. It needs no parameters and returns an integer. ____ Stack Implementation
class Stack: def __init__(self): self.items = [] def isEmpty(self): return self.items == [] def push(self, item): self.items.append(item) def pop(self): return self.items.pop() def peek(self): return self.items[len(self.items)-1] def size(self): return len(self.items)
_____no_output_____
MIT
code/algorithms/course_udemy_1/Stacks, Queues and Deques/Implementation of Stack.ipynb
vicb1/miscellaneous
Let's try it out!
s = Stack() print s.isEmpty() s.push(1) s.push('two') s.peek() s.push(True) s.size() s.isEmpty() s.pop() s.pop() s.size() s.pop() s.isEmpty()
_____no_output_____
MIT
code/algorithms/course_udemy_1/Stacks, Queues and Deques/Implementation of Stack.ipynb
vicb1/miscellaneous
| Name | Description | Date| :- |-------------: | :-:|__Reza Hashemi__| __Function approximation by linear model and deep network LOOP test__. | __On 10th of August 2019__ Function approximation with linear models and neural network* Are Linear models sufficient for approximating transcedental functions? What about polynomial functions?* Do neural networks perform better in those cases?* Does the depth of the neural network matter? Import basic libraries
import numpy as np import pandas as pd import math import matplotlib.pyplot as plt %matplotlib inline
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Global variables for the program
N_points = 100 # Number of points for constructing function x_min = 1 # Min of the range of x (feature) x_max = 25 # Max of the range of x (feature) noise_mean = 0 # Mean of the Gaussian noise adder noise_sd = 10 # Std.Dev of the Gaussian noise adder test_set_fraction = 0.2
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Generate feature and output vector for a non-linear function with transcedental termsThe ground truth or originating function is as follows:$$ y=f(x)= (20x+3x^2+0.1x^3).sin(x).e^{-0.1x}+\psi(x) $$$$ {OR} $$$$ y=f(x)= (20x+3x^2+0.1x^3)+\psi(x) $$$${where,}\ \psi(x) : {\displaystyle f(x\;|\;\mu ,\sigma ^{2})={\frac {1}{\sqrt {2\pi \sigma ^{2}}}}\;e^{-{\frac {(x-\mu )^{2}}{2\sigma ^{2}}}}} $$
# Definition of the function with exponential and sinusoidal terms def func_trans(x): result = (20*x+3*x**2+0.1*x**3)*np.sin(x)*np.exp(-0.1*x) return (result) # Definition of the function without exponential and sinusoidal terms i.e. just the polynomial def func_poly(x): result = 20*x+3*x**2+0.1*x**3 return (result) # Densely spaced points for generating the ideal functional curve x_smooth = np.array(np.linspace(x_min,x_max,501)) # Use one of the following y_smooth = func_trans(x_smooth) #y_smooth = func_poly(x_smooth) # Linearly spaced sample points X=np.array(np.linspace(x_min,x_max,N_points)) # Added observational/measurement noise noise_x = np.random.normal(loc=noise_mean,scale=noise_sd,size=N_points) # Observed output after adding the noise y = func_trans(X)+noise_x # Store the values in a DataFrame df = pd.DataFrame(data=X,columns=['X']) df['Ideal y']=df['X'].apply(func_trans) df['Sin_X']=df['X'].apply(math.sin) df['y']=y df.head()
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Plot the function(s), both the ideal characteristic and the observed output (with process and observation noise)
df.plot.scatter('X','y',title='True process and measured samples\n', grid=True,edgecolors=(0,0,0),c='blue',s=60,figsize=(10,6)) plt.plot(x_smooth,y_smooth,'k')
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Import scikit-learn librares and prepare train/test splits
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LassoCV from sklearn.linear_model import RidgeCV from sklearn.ensemble import AdaBoostRegressor from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline X_train, X_test, y_train, y_test = train_test_split(df[['X','Sin_X']], df['y'], test_size=test_set_fraction) #X_train=X_train.reshape(X_train.size,1) #y_train=y_train.reshape(y_train.size,1) #X_test=X_test.reshape(X_test.size,1) #y_test=y_test.reshape(y_test.size,1) #X_train=X_train.reshape(-1,1) #y_train=y_train.reshape(-1,1) #X_test=X_test.reshape(-1,1) #y_test=y_test.reshape(-1,1) from sklearn import preprocessing X_scaled = preprocessing.scale(X_train) y_scaled = preprocessing.scale(y_train)
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Polynomial model with LASSO/Ridge regularization (pipelined) with lineary spaced samples** This is an advanced machine learning method which prevents over-fitting by penalizing high-valued coefficients i.e. keep them bounded **
# Regression model parameters ridge_alpha = tuple([10**(x) for x in range(-3,0,1) ]) # Alpha (regularization strength) of ridge regression # Alpha (regularization strength) of LASSO regression lasso_eps = 0.0001 lasso_nalpha=20 lasso_iter=5000 # Min and max degree of polynomials features to consider degree_min = 2 degree_max = 8 linear_sample_score = [] poly_degree = [] rmse=[] t_linear=[] import time for degree in range(degree_min,degree_max+1): t1=time.time() #model = make_pipeline(PolynomialFeatures(degree), RidgeCV(alphas=ridge_alpha,normalize=True,cv=5)) model = make_pipeline(PolynomialFeatures(degree), LassoCV(eps=lasso_eps,n_alphas=lasso_nalpha, max_iter=lasso_iter,normalize=True,cv=5)) #model = make_pipeline(PolynomialFeatures(degree), LinearRegression(normalize=True)) model.fit(X_train, y_train) t2=time.time() t = t2-t1 t_linear.append(t) test_pred = np.array(model.predict(X_test)) RMSE=np.sqrt(np.sum(np.square(test_pred-y_test))) test_score = model.score(X_test,y_test) linear_sample_score.append(test_score) rmse.append(RMSE) poly_degree.append(degree) #print("Test score of model with degree {}: {}\n".format(degree,test_score)) plt.figure() plt.title("Predicted vs. actual for polynomial of degree {}".format(degree),fontsize=15) plt.xlabel("Actual values") plt.ylabel("Predicted values") plt.scatter(y_test,test_pred) plt.plot(y_test,y_test,'r',lw=2) linear_sample_score plt.figure(figsize=(8,5)) plt.grid(True) plt.plot(poly_degree,rmse,lw=3,c='red') plt.title("Model complexity (highest polynomial degree) vs. test score\n",fontsize=20) plt.xlabel ("\nDegree of polynomial",fontsize=20) plt.ylabel ("Root-mean-square error on test set",fontsize=15) df_score = pd.DataFrame(data={'degree':[d for d in range(degree_min,degree_max+1)], 'Linear sample score':linear_sample_score}) # Save the best R^2 score r2_linear = max(linear_sample_score) print("Best R^2 score for linear polynomial degree models:",r2_linear) plt.figure(figsize=(8,5)) plt.grid(True) plt.plot(poly_degree,linear_sample_score,lw=3,c='red') plt.xlabel ("\nModel Complexity: Degree of polynomial",fontsize=20) plt.ylabel ("R^2 score on test set",fontsize=15)
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
1-hidden layer (Shallow) network
import tensorflow as tf learning_rate = 1e-6 training_epochs = 150000 n_input = 1 # Number of features n_output = 1 # Regression output is a number only n_hidden_layer = 100 # layer number of features weights = { 'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])), 'out': tf.Variable(tf.random_normal([n_hidden_layer, n_output])) } biases = { 'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])), 'out': tf.Variable(tf.random_normal([n_output])) } # tf Graph input x = tf.placeholder("float32", [None,n_input]) y = tf.placeholder("float32", [None,n_output]) # Hidden layer with RELU activation layer_1 = tf.add(tf.matmul(x, weights['hidden_layer']),biases['hidden_layer']) layer_1 = tf.sin(layer_1) # Output layer with linear activation ops = tf.add(tf.matmul(layer_1, weights['out']), biases['out']) # Define loss and optimizer cost = tf.reduce_mean(tf.squared_difference(ops,y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) from tqdm import tqdm import time # Initializing the variables init = tf.global_variables_initializer() # Empty lists for book-keeping purpose epoch=0 log_epoch = [] epoch_count=[] acc=[] loss_epoch=[] X_train, X_test, y_train, y_test = train_test_split(df['X'], df['y'], test_size=test_set_fraction) X_train=X_train.reshape(X_train.size,1) y_train=y_train.reshape(y_train.size,1) X_test=X_test.reshape(X_test.size,1) y_test=y_test.reshape(y_test.size,1) # Launch the graph and time the session t1=time.time() with tf.Session() as sess: sess.run(init) # Loop over epochs for epoch in tqdm(range(training_epochs)): # Run optimization process (backprop) and cost function (to get loss value) _,l=sess.run([optimizer,cost], feed_dict={x: X_train, y: y_train}) loss_epoch.append(l) # Save the loss for every epoch epoch_count.append(epoch+1) #Save the epoch count # print("Epoch {}/{} finished. Loss: {}, Accuracy: {}".format(epoch+1,training_epochs,round(l,4),round(accu,4))) #print("Epoch {}/{} finished. Loss: {}".format(epoch+1,training_epochs,round(l,4))) w=sess.run(weights) b = sess.run(biases) yhat=sess.run(ops,feed_dict={x:X_test}) t2=time.time() time_SNN = t2-t1 plt.plot(loss_epoch) # Total variance SSt_SNN = np.sum(np.square(y_test-np.mean(y_test))) # Residual sum of squares SSr_SNN = np.sum(np.square(yhat-y_test)) # Root-mean-square error RMSE_SNN = np.sqrt(np.sum(np.square(yhat-y_test))) # R^2 coefficient r2_SNN = 1-(SSr_SNN/SSt_SNN) print("RMSE error of the shallow neural network:",RMSE_SNN) print("R^2 value of the shallow neural network:",r2_SNN) plt.figure(figsize=(10,6)) plt.title("Predicted vs. actual (test set) for shallow (1-hidden layer) neural network\n",fontsize=15) plt.xlabel("Actual values (test set)") plt.ylabel("Predicted values") plt.scatter(y_test,yhat,edgecolors='k',s=100,c='green') plt.grid(True) plt.plot(y_test,y_test,'r',lw=2)
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Deep Neural network for regression Import and declaration of variables
import tensorflow as tf learning_rate = 1e-6 training_epochs = 15000 n_input = 1 # Number of features n_output = 1 # Regression output is a number only n_hidden_layer_1 = 30 # Hidden layer 1 n_hidden_layer_2 = 30 # Hidden layer 2
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Weights and bias variable
# Store layers weight & bias as Variables classes in dictionaries weights = { 'hidden_layer_1': tf.Variable(tf.random_normal([n_input, n_hidden_layer_1])), 'hidden_layer_2': tf.Variable(tf.random_normal([n_hidden_layer_1, n_hidden_layer_2])), 'out': tf.Variable(tf.random_normal([n_hidden_layer_2, n_output])) } biases = { 'hidden_layer_1': tf.Variable(tf.random_normal([n_hidden_layer_1])), 'hidden_layer_2': tf.Variable(tf.random_normal([n_hidden_layer_2])), 'out': tf.Variable(tf.random_normal([n_output])) }
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Input data as placeholder
# tf Graph input x = tf.placeholder("float32", [None,n_input]) y = tf.placeholder("float32", [None,n_output])
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Hidden and output layers definition (using TensorFlow mathematical functions)
# Hidden layer with activation layer_1 = tf.add(tf.matmul(x, weights['hidden_layer_1']),biases['hidden_layer_1']) layer_1 = tf.sin(layer_1) layer_2 = tf.add(tf.matmul(layer_1, weights['hidden_layer_2']),biases['hidden_layer_2']) layer_2 = tf.nn.relu(layer_2) # Output layer with linear activation ops = tf.add(tf.matmul(layer_2, weights['out']), biases['out'])
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Gradient descent optimizer for training (backpropagation):For the training of the neural network we need to perform __backpropagation__ i.e. propagate the errors, calculated by this cost function, backwards through the layers all the way up to the input weights and bias in order to adjust them accordingly (minimize the error). This involves taking first-order derivatives of the activation functions and applying chain-rule to ___'multiply'___ the effect of various layers as the error propagates back.You can read more on this here: [Backpropagation in Neural Network](https://en.wikipedia.org/wiki/Backpropagation)Fortunately, TensorFlow already implicitly implements this step i.e. takes care of all the chained differentiations for us. All we need to do is to specify an Optimizer object and pass on the cost function. Here, we are using a Gradient Descent Optimizer.Gradient descent is a first-order iterative optimization algorithm for finding the minimum of a function. To find a local minimum of a function using gradient descent, one takes steps proportional to the negative of the gradient (or of the approximate gradient) of the function at the current point.You can read more on this: [Gradient Descent](https://en.wikipedia.org/wiki/Gradient_descent)
# Define loss and optimizer cost = tf.reduce_mean(tf.squared_difference(ops,y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
TensorFlow Session for training and loss estimation
from tqdm import tqdm import time # Initializing the variables init = tf.global_variables_initializer() # Empty lists for book-keeping purpose epoch=0 log_epoch = [] epoch_count=[] acc=[] loss_epoch=[] r2_DNN = [] test_size = [] for i in range(5): X_train, X_test, y_train, y_test = train_test_split(df['X'], df['y'], test_size=test_set_fraction) X_train=X_train.reshape(X_train.size,1) y_train=y_train.reshape(y_train.size,1) X_test=X_test.reshape(X_test.size,1) y_test=y_test.reshape(y_test.size,1) # Launch the graph and time the session with tf.Session() as sess: sess.run(init) # Loop over epochs for epoch in tqdm(range(training_epochs)): # Run optimization process (backprop) and cost function (to get loss value) #r1 = int(epoch/10000) #learning_rate = learning_rate-r1*3e-6 #optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) _,l=sess.run([optimizer,cost], feed_dict={x: X_train, y: y_train}) yhat=sess.run(ops,feed_dict={x:X_test}) #test_size.append(0.5-(i*0.04)) # Total variance SSt_DNN = np.sum(np.square(y_test-np.mean(y_test))) # Residual sum of squares SSr_DNN = np.sum(np.square(yhat-y_test)) # Root-mean-square error RMSE_DNN = np.sqrt(np.sum(np.square(yhat-y_test))) # R^2 coefficient r2 = 1-(SSr_DNN/SSt_DNN) r2_DNN.append(r2) print("Run: {} finished. Score: {}".format(i+1,r2))
C:\Users\Tirtha\Python\Anaconda3\lib\site-packages\ipykernel_launcher.py:19: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead C:\Users\Tirtha\Python\Anaconda3\lib\site-packages\ipykernel_launcher.py:20: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead C:\Users\Tirtha\Python\Anaconda3\lib\site-packages\ipykernel_launcher.py:21: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead C:\Users\Tirtha\Python\Anaconda3\lib\site-packages\ipykernel_launcher.py:22: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead 100%|██████████████████████████████████████████████████████████████████████████| 15000/15000 [00:12<00:00, 1184.93it/s]
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Plot R2 score corss-validation results
plt.figure(figsize=(10,6)) plt.title("\nR2-score for cross-validation runs of \ndeep (2-layer) neural network\n",fontsize=25) plt.xlabel("\nCross-validation run with random test/train split #",fontsize=15) plt.ylabel("R2 score (test set)\n",fontsize=15) plt.scatter([i+1 for i in range(5)],r2_DNN,edgecolors='k',s=100,c='green') plt.grid(True)
_____no_output_____
BSD-2-Clause
Function Approximation by Neural Network/Function approximation by linear model and deep network LOOP test.ipynb
rezapci/Machine-Learning
Controlling Flow with Conditional StatementsNow that you've learned how to create conditional statements, let's learn how to use them to control the flow of our programs. This is done with `if`, `elif`, and `else` statements. The `if` Statement What if we wanted to check if a number was divisible by 2 and if so then print that number out. Let's diagram that out. ![image.png](attachment:image.png)- Check to see if A is even- If yes, then print our message: "A is even" This use case can be translated into a "if" statement. I'm going to write this out in pseudocode which looks very similar to Python.```textif A is even: print "A is even"```
# Let's translate this into Python code def check_evenness(A): if A % 2 == 0: print(f"A ({A:02}) is even!") for i in range(1, 11): check_evenness(i) # You can do multiple if statements and they're executed sequentially A = 10 if A > 0: print('A is positive') if A % 2 == 0: print('A is even!')
A is positive A is even!
MIT
Lecture Material/07_Conditional_Logic_and_Control_Flow/07.3_ControllingFlowWithConditionalStatements.ipynb
knherrera/pcc-cis-012-intro-to-programming-python
The `else` StatementBut what if we wanted to know if the number was even OR odd? Let's diagram that out:![image.png](attachment:image.png) Again, translating this to pseudocode, we're going to use the 'else' statement:```textif A is even: print "A is even"else: print "A is odd"```
# Let's translate this into Python code def check_evenness(A): if A % 2 == 0: print(f"A ({A:02}) is even!") else: print(f'A ({A:02}) is odd!') for i in range(1, 11): check_evenness(i)
A (01) is odd! A (02) is even! A (03) is odd! A (04) is even! A (05) is odd! A (06) is even! A (07) is odd! A (08) is even! A (09) is odd! A (10) is even!
MIT
Lecture Material/07_Conditional_Logic_and_Control_Flow/07.3_ControllingFlowWithConditionalStatements.ipynb
knherrera/pcc-cis-012-intro-to-programming-python
The 'else if' or `elif` StatementWhat if we wanted to check if A is divisible by 2 or 3? Let's diagram that out: ![image.png](attachment:image.png) Again, translating this into psuedocode, we're going to use the 'else if' statement.```textif A is divisible by 2: print "2 divides A"else if A is divisible by 3: print "3 divides A"else print "2 and 3 don't divide A"```
# Let's translate this into Python code def check_divisible_by_2_and_3(A): if A % 2 == 0: print(f"2 divides A ({A:02})!") # else if in Python is elif elif A % 3 == 0: print(f'3 divides A ({A:02})!') else: print(f'A ({A:02}) is not divisible by 2 or 3)') for i in range(1, 11): check_divisible_by_2_and_3(i)
A (01) is not divisible by 2 or 3) 2 divides A (02)! 3 divides A (03)! 2 divides A (04)! A (05) is not divisible by 2 or 3) 2 divides A (06)! A (07) is not divisible by 2 or 3) 2 divides A (08)! 3 divides A (09)! 2 divides A (10)!
MIT
Lecture Material/07_Conditional_Logic_and_Control_Flow/07.3_ControllingFlowWithConditionalStatements.ipynb
knherrera/pcc-cis-012-intro-to-programming-python
Order MattersWhen chaining conditionals, you need to be careful how you order them. For example, what if we wanted te check if a number is divisible by 2, 3, or both: ![image.png](attachment:image.png)
# Let's translate this into Python code def check_divisible_by_2_and_3(A): if A % 2 == 0: print(f"2 divides A ({A:02})!") elif A % 3 == 0: print(f'3 divides A ({A:02})!') elif A % 2 == 0 and A % 3 == 0: print(f'2 and 3 divides A ({A:02})!') else: print(f"2 or 3 doesn't divide A ({A:02})") for i in range(1, 11): check_divisible_by_2_and_3(i)
2 or 3 doesn't divide A (01) 2 divides A (02)! 3 divides A (03)! 2 divides A (04)! 2 or 3 doesn't divide A (05) 2 divides A (06)! 2 or 3 doesn't divide A (07) 2 divides A (08)! 3 divides A (09)! 2 divides A (10)!
MIT
Lecture Material/07_Conditional_Logic_and_Control_Flow/07.3_ControllingFlowWithConditionalStatements.ipynb
knherrera/pcc-cis-012-intro-to-programming-python
Wait! we would expect that 6, which is divisible by both 2 and 3 to show that! Looking back at the graphic, we can see that the flow is checking for 2 first, and since that's true we follow that path first. Let's make a correction to our diagram to fix this: ![image.png](attachment:image.png)
# Let's translate this into Python code def check_divisible_by_2_and_3(A): if A % 2 == 0 and A % 3 == 0: print(f'2 and 3 divides A ({A:02})!') elif A % 3 == 0: print(f'3 divides A ({A:02})!') elif A % 2 == 0: print(f"2 divides A ({A:02})!") else: print(f"2 or 3 doesn't divide A ({A:02})") for i in range(1, 11): check_divisible_by_2_and_3(i)
2 or 3 doesn't divide A (01) 2 divides A (02)! 3 divides A (03)! 2 divides A (04)! 2 or 3 doesn't divide A (05) 2 and 3 divides A (06)! 2 or 3 doesn't divide A (07) 2 divides A (08)! 3 divides A (09)! 2 divides A (10)!
MIT
Lecture Material/07_Conditional_Logic_and_Control_Flow/07.3_ControllingFlowWithConditionalStatements.ipynb
knherrera/pcc-cis-012-intro-to-programming-python
**NOTE:** Always put your most restrictive conditional at the top of your if statements and then work your way down to the least restrictive.![image.png](attachment:image.png) In-Class Assignments- Create a funcition that takes two inputs variables `A` and `divisor`. Check if `divisor` divides into `A`. If it does, print `" is divided by "`. Don't forget about the `in` operator that checks if a substring is in another string.- Create a function that takes an input variable `A` which is a string. Check if `A` has the substring `apple`, `peach`, or `blueberry` in it. Print out which of these are found within the string. Note: you could do this using just if/elif/else statements, but is there a better way using lists, for loops, and if/elif/else statements? Solutions
def is_divisible(A, divisor): if A % divisor == 0: print(f'{A} is divided by {divisor}') A = 37 # this is actually a crude way to find if the number is prime for i in range(2, int(A / 2)): is_divisible(A, i) # notice that nothing was printed? That's because 37 is prime B = 27 for i in range(2, int(B / 2)): is_divisible(B, i) # this is ONE solution. There are more out there and probably better # one too def check_for_fruit(A): found_fruit = [] if 'apple' in A: found_fruit.append('apple') if 'peach' in A: found_fruit.append('peach') if 'blueberry' in A: found_fruit.append('blueberry') found_fruit_str = '' for fruit in found_fruit: found_fruit_str += fruit found_fruit_str += ', ' if len(found_fruit) > 0: print(found_fruit_str + ' is found within the string') else: print('No fruit found in the string') check_for_fruit('there are apples and peaches in this pie')
apple, peach, is found within the string
MIT
Lecture Material/07_Conditional_Logic_and_Control_Flow/07.3_ControllingFlowWithConditionalStatements.ipynb
knherrera/pcc-cis-012-intro-to-programming-python
Example 4.1Let \{0,1,2,3\} denote the actions \{up, right, down, left\} respectively.
import numpy as np class gridworld: def __init__(self): self.terminal_state = [0,15] self.action = [0,1,2,3] self.value = np.zeros(16) self.reward = -1 def next_state(self, s, a): if s in self.terminal_state: return s if a == 0: s = s - 4 if s >= 4 else s elif a == 1: s = s + 1 if (s+1)%4 != 0 else s elif a == 2: s = s + 4 if (s+4) < 16 else s elif a == 3: s = s - 1 if s % 4 !=0 else s return s def policy_evaluation(self): k=0 while True: delta = 0 v_new = np.zeros(16) for s in range(16): v = 0 if s in self.terminal_state: v_new[s] = 0 else: v = self.value[s] temp = 0 for a in range(4): v_new[s] += 0.25*(-1 + self.value[self.next_state(s,a)]) delta = max(delta, abs(v-v_new[s])) self.value = v_new # greedy policy policy = np.zeros((16,4)) for s in range(1,15): vmax = -30 nmax = 0 for a in range(4): v = self.value[self.next_state(s,a)] if v > vmax: vmax = v nmax = 1 elif vmax - v < 0.05: nmax += 1 for a in range(4): v = self.value[self.next_state(s,a)] if v == vmax: policy[s,a] = 1/nmax k += 1 if k == 1 or k==2 or k==3 or k==10 or k==131: print('The state value for %ith iteration:\n' %k, v_new.reshape((4,4))) print('The greedy policy for %ith iteration:\n' %k, policy) if delta < 0.001: return a = gridworld() a.policy_evaluation()
The state value for 1th iteration: [[ 0. -1. -1. -1.] [-1. -1. -1. -1.] [-1. -1. -1. -1.] [-1. -1. -1. 0.]] The greedy policy for 1th iteration: [[0. 0. 0. 0. ] [0. 0. 0. 1. ] [0.25 0.25 0.25 0.25] [0.25 0.25 0.25 0.25] [1. 0. 0. 0. ] [0.25 0.25 0.25 0.25] [0.25 0.25 0.25 0.25] [0.25 0.25 0.25 0.25] [0.25 0.25 0.25 0.25] [0.25 0.25 0.25 0.25] [0.25 0.25 0.25 0.25] [0. 0. 1. 0. ] [0.25 0.25 0.25 0.25] [0.25 0.25 0.25 0.25] [0. 1. 0. 0. ] [0. 0. 0. 0. ]] The state value for 2th iteration: [[ 0. -1.75 -2. -2. ] [-1.75 -2. -2. -2. ] [-2. -2. -2. -1.75] [-2. -2. -1.75 0. ]] The greedy policy for 2th iteration: [[0. 0. 0. 0. ] [0. 0. 0. 1. ] [0. 0. 0. 1. ] [0.25 0.25 0.25 0.25] [1. 0. 0. 0. ] [0.5 0. 0. 0.5 ] [0.25 0.25 0.25 0.25] [0. 0. 1. 0. ] [1. 0. 0. 0. ] [0.25 0.25 0.25 0.25] [0. 0.5 0.5 0. ] [0. 0. 1. 0. ] [0.25 0.25 0.25 0.25] [0. 1. 0. 0. ] [0. 1. 0. 0. ] [0. 0. 0. 0. ]] The state value for 3th iteration: [[ 0. -2.4375 -2.9375 -3. ] [-2.4375 -2.875 -3. -2.9375] [-2.9375 -3. -2.875 -2.4375] [-3. -2.9375 -2.4375 0. ]] The greedy policy for 3th iteration: [[0. 0. 0. 0. ] [0. 0. 0. 1. ] [0. 0. 0. 1. ] [0. 0. 0.5 0.5] [1. 0. 0. 0. ] [0.5 0. 0. 0.5] [0. 0. 0.5 0.5] [0. 0. 1. 0. ] [1. 0. 0. 0. ] [0.5 0.5 0. 0. ] [0. 0.5 0.5 0. ] [0. 0. 1. 0. ] [0.5 0.5 0. 0. ] [0. 1. 0. 0. ] [0. 1. 0. 0. ] [0. 0. 0. 0. ]] The state value for 10th iteration: [[ 0. -6.13796997 -8.35235596 -8.96731567] [-6.13796997 -7.73739624 -8.42782593 -8.35235596] [-8.35235596 -8.42782593 -7.73739624 -6.13796997] [-8.96731567 -8.35235596 -6.13796997 0. ]] The greedy policy for 10th iteration: [[0. 0. 0. 0. ] [0. 0. 0. 1. ] [0. 0. 0. 1. ] [0. 0. 0.5 0.5] [1. 0. 0. 0. ] [0.5 0. 0. 0.5] [0. 0. 0.5 0.5] [0. 0. 1. 0. ] [1. 0. 0. 0. ] [0.5 0.5 0. 0. ] [0. 0.5 0.5 0. ] [0. 0. 1. 0. ] [0.5 0.5 0. 0. ] [0. 1. 0. 0. ] [0. 1. 0. 0. ] [0. 0. 0. 0. ]] The state value for 131th iteration: [[ 0. -13.98945772 -19.98437823 -21.98251832] [-13.98945772 -17.98623815 -19.98448273 -19.98437823] [-19.98437823 -19.98448273 -17.98623815 -13.98945772] [-21.98251832 -19.98437823 -13.98945772 0. ]] The greedy policy for 131th iteration: [[0. 0. 0. 0. ] [0. 0. 0. 1. ] [0. 0. 0. 1. ] [0. 0. 0.5 0.5] [1. 0. 0. 0. ] [0.5 0. 0. 0. ] [0. 0. 0.5 0.5] [0. 0. 1. 0. ] [1. 0. 0. 0. ] [0.5 0.5 0. 0. ] [0. 0.5 0.5 0. ] [0. 0. 1. 0. ] [0.5 0.5 0. 0. ] [0. 1. 0. 0. ] [0. 1. 0. 0. ] [0. 0. 0. 0. ]]
MIT
example4.1/example4.1.ipynb
jerryzenghao/ReinformanceLearning
Laboratorio 8
import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import plot_confusion_matrix %matplotlib inline digits_X, digits_y = datasets.load_digits(return_X_y=True, as_frame=True) digits = pd.concat([digits_X, digits_y], axis=1) digits.head()
_____no_output_____
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
Ejercicio 1(1 pto.)Utilizando todos los datos, ajusta un modelo de regresión logística a los datos de dígitos. No agregues intercepto y define un máximo de iteraciones de 400.Obtén el _score_ y explica el tan buen resultado.
logistic = LogisticRegression(solver="lbfgs", max_iter=400, fit_intercept=False) fit=logistic.fit(digits_X, digits_y) print(f"El score del modelo de regresión logística es {fit.score(digits_X, digits_y)}")
El score del modelo de regresión logística es 1.0
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
__Respuesta:__ Supongo que es porque no estamos usando los datos originales, y no otros datos predecidos a partir de los originales Ejercicio 2(1 pto.)Utilizando todos los datos, ¿Cuál es la mejor elección del parámetro $k$ al ajustar un modelo kNN a los datos de dígitos? Utiliza valores $k=2, ..., 10$.
for k in range(2, 11): kNN = KNeighborsClassifier(n_neighbors=k) fit=kNN.fit(digits_X, digits_y) print(f"El score del modelo de kNN con k={k} es {fit.score(digits_X, digits_y)}")
El score del modelo de kNN con k=2 es 0.9910962715637173 El score del modelo de kNN con k=3 es 0.993322203672788 El score del modelo de kNN con k=4 es 0.9922092376182526 El score del modelo de kNN con k=5 es 0.9905397885364496 El score del modelo de kNN con k=6 es 0.989983305509182 El score del modelo de kNN con k=7 es 0.9905397885364496 El score del modelo de kNN con k=8 es 0.9894268224819143 El score del modelo de kNN con k=9 es 0.9888703394546466 El score del modelo de kNN con k=10 es 0.9855314412910406
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
__Respuesta:__ El caso k=3, porque es el mas cercano a 1. Ejercicio 3(1 pto.)Grafica la matriz de confusión normalizada por predicción de ambos modelos (regresión logística y kNN con la mejor elección de $k$).¿Qué conclusión puedes sacar?Hint: Revisa el argumento `normalize` de la matriz de confusión.
plot_confusion_matrix(logistic, digits_X, digits_y, normalize='true'); best_knn = KNeighborsClassifier(n_neighbors=3) B_kNN = best_knn.fit(digits_X, digits_y) plot_confusion_matrix(B_kNN, digits_X, digits_y, normalize='true');
_____no_output_____
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
__Respuesta:__ Que la primera matriz es una mejor prediccion que la segunda, esto porque se pudo obtener una matriz diagonal con, asumo, menor cantidad de errores comparado a los valores que no se encuentran en la diagonal y que son distintos de 0 en el segundo caso. Ejercicio 4(1 pto.)Escoge algún registro donde kNN se haya equivocado, _plotea_ la imagen y comenta las razones por las que el algoritmo se pudo haber equivocado.
neigh_tt = KNeighborsClassifier(n_neighbors=5) neigh_tt.fit(digits_X, digits_y)
_____no_output_____
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
El valor real del registro seleccionado es
i = 5 neigh_tt.predict(digits_X.iloc[[i], :])
_____no_output_____
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
Mentras que la predicción dada por kNN es
neigh_tt.predict_proba(digits_X.iloc[[i], :])
_____no_output_____
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
A continuación la imagen
plt.imshow(digits_X.loc[[i], :].to_numpy().reshape(8, 8), cmap=plt.cm.gray_r, interpolation='nearest');
_____no_output_____
MIT
labs/lab08.ipynb
Flipom/mat281_portfolio
**Spit some [tensor] flow**We need to learn the intricacies of tensorflow to master deep learning`Let's get this over with`
import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import cv2 print(tf.__version__) def evaluation_tf(report, y_test, y_pred, classes): plt.plot(report.history['loss'], label = 'training_loss') plt.plot(report.history['val_loss'], label = 'validation_loss') plt.legend() plt.show() plt.plot(report.history['accuracy'], label = 'training_accuracy') plt.plot(report.history['val_accuracy'], label = 'validation_accuracy') plt.legend() plt.show() from sklearn.metrics import confusion_matrix import itertools cm = confusion_matrix(y_test, y_pred) plt.figure(figsize=(10,10)) plt.imshow(cm, cmap=plt.cm.Blues) for i,j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i,j], 'd'), horizontalalignment = 'center', color='black') plt.xlabel("Predicted labels") plt.ylabel("True labels") plt.xticks(range(0,classes)) plt.yticks(range(0,classes)) plt.title('Confusion matrix') plt.colorbar() plt.show() # Taken from https://www.cs.toronto.edu/~kriz/cifar.html labels = "airplane,automobile,bird,cat,deer,dog,frog,horse,ship,truck".split(",")
_____no_output_____
MIT
Tensorflow_2X_Notebooks/Demo26_CNN_CIFAR10_DataAugmentation.ipynb
mahnooranjum/Tensorflow_DeepLearning