markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Discriminator
#!L class Discriminator(nn.Module): def __init__(self, in_dim, hidden_dim=100): super(Discriminator, self).__init__() self.fc1 = nn.Linear(in_dim, hidden_dim) nn.init.xavier_normal_(self.fc1.weight) nn.init.constant_(self.fc1.bias, 0.0) self.fc2 = nn.Linear(hidden_dim, hidden_dim) nn.init.xavier_normal_(self.fc2.weight) nn.init.constant_(self.fc2.bias, 0.0) self.fc3 = nn.Linear(hidden_dim, hidden_dim) nn.init.xavier_normal_(self.fc3.weight) nn.init.constant_(self.fc3.bias, 0.0) self.fc4 = nn.Linear(hidden_dim, 1) nn.init.xavier_normal_(self.fc4.weight) nn.init.constant_(self.fc4.bias, 0.0) def forward(self, x): h1 = F.tanh(self.fc1(x)) h2 = F.leaky_relu(self.fc2(h1)) h3 = F.leaky_relu(self.fc3(h2)) score = torch.sigmoid(self.fc4(h3)) return score
_____no_output_____
MIT
homework03/homework03_part3_gan_basic.ipynb
VendettaPrime/Practical_DL
Define updates and losses
#!L generator = Generator(NOISE_DIM, out_dim = 2) discriminator = Discriminator(in_dim = 2) lr = 0.001 g_optimizer = optim.Adam(generator.parameters(), lr=lr, betas=(0.5, 0.999)) d_optimizer = optim.Adam(discriminator.parameters(), lr=lr, betas=(0.5, 0.999))
_____no_output_____
MIT
homework03/homework03_part3_gan_basic.ipynb
VendettaPrime/Practical_DL
Notice we are using ADAM optimizer with `beta1=0.5` for both discriminator and discriminator. This is a common practice and works well. Motivation: models should be flexible and adapt itself rapidly to the distributions. You can try different optimizers and parameters.
#!L ################################ # IMPLEMENT HERE # Define the g_loss and d_loss here # these are the only lines of code you need to change to implement GAN game def g_loss(): # if TASK == 1: # do something return # TODO def d_loss(): # if TASK == 1: # do something return # TODO ################################
_____no_output_____
MIT
homework03/homework03_part3_gan_basic.ipynb
VendettaPrime/Practical_DL
Get real data
#!L data = sample_true(100000) def iterate_minibatches(X, batchsize, y=None): perm = np.random.permutation(X.shape[0]) for start in range(0, X.shape[0], batchsize): end = min(start + batchsize, X.shape[0]) if y is None: yield X[perm[start:end]] else: yield X[perm[start:end]], y[perm[start:end]] #!L plt.rcParams['figure.figsize'] = (12, 12) vis_data(data) vis_g() vis_d()
_____no_output_____
MIT
homework03/homework03_part3_gan_basic.ipynb
VendettaPrime/Practical_DL
**Legend**:- Blue dots are generated samples. - Colored histogram at the back shows density of real data. - And with arrows we show gradients of the discriminator -- they are the directions that discriminator pushes generator's samples. Train the model
#!L from IPython import display plt.xlim(lims) plt.ylim(lims) num_epochs = 100 batch_size = 64 # =========================== # IMPORTANT PARAMETER: # Number of D updates per G update # =========================== k_d, k_g = 4, 1 accs = [] try: for epoch in range(num_epochs): for input_data in iterate_minibatches(data, batch_size): # Optimize D for _ in range(k_d): # Sample noise noise = Variable(torch.Tensor(sample_noise(len(input_data)))) # Do an update inp_data = Variable(torch.Tensor(input_data)) data_gen = generator(noise) loss = d_loss(discriminator(data_gen), discriminator(inp_data)) d_optimizer.zero_grad() loss.backward() d_optimizer.step() # Optimize G for _ in range(k_g): # Sample noise noise = Variable(torch.Tensor(sample_noise(len(input_data)))) # Do an update data_gen = generator(noise) loss = g_loss(discriminator(data_gen)) g_optimizer.zero_grad() loss.backward() g_optimizer.step() # Visualize plt.clf() vis_data(data); vis_g(); vis_d() display.clear_output(wait=True) display.display(plt.gcf()) except KeyboardInterrupt: pass
_____no_output_____
MIT
homework03/homework03_part3_gan_basic.ipynb
VendettaPrime/Practical_DL
> **Copyright (c) 2020 Skymind Holdings Berhad**> **Copyright (c) 2021 Skymind Education Group Sdn. Bhd.**Licensed under the Apache License, Version 2.0 (the \"License\");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0/Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an \"AS IS\" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.**SPDX-License-Identifier: Apache-2.0** INSTRUCTION: Follow the steps in the commented line for each section and run the code.
""" install torch(PyTorch) and transformers to install them type in your terminal: pip install torch pip install transformers """ # import the necessary library from transformers import pipeline # write your context (where model seeks the answer for the question) context = """ You can add your own context here. Try to write something or copy from other source. """ # write your own question question = "" # initialize your model """ This is a pretrained model that we can get from huggingface There are more models that we can find there: https://huggingface.co/ Go to this web page and import a model and a tokenizer by putting the model and tokenizer into the parameters """ # uncomment this code below # question_answering = pipeline('question-answering', model= , tokenizer=) # test the model (uncomment the code below) # result = question_answering(question=question, context=context) # print("Answer:", result['answer']) # print("Score:", result['score'])
_____no_output_____
Apache-2.0
nlp-labs/Day_09/QnA_Model/QnA_Handson.ipynb
skymind-talent/nlp-traininglabs
1. Import libraries
#----------------------------Reproducible---------------------------------------------------------------------------------------- import numpy as np import tensorflow as tf import random as rn import os seed=0 os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) rn.seed(seed) #session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) session_conf =tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) from keras import backend as K #tf.set_random_seed(seed) tf.compat.v1.set_random_seed(seed) #sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf) K.set_session(sess) #----------------------------Reproducible---------------------------------------------------------------------------------------- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #-------------------------------------------------------------------------------------------------------------------------------- import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm %matplotlib inline matplotlib.style.use('ggplot') import random import scipy.sparse as sparse import scipy.io from keras.utils import to_categorical from sklearn.ensemble import ExtraTreesClassifier from sklearn.model_selection import cross_val_score from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from skfeature.function.similarity_based import lap_score from skfeature.utility import construct_W from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.linear_model import LinearRegression import time import pandas as pd def mse_check(train, val): LR = LinearRegression(n_jobs = -1) LR.fit(train[0], train[1]) MSELR = ((LR.predict(val[0]) - val[1]) ** 2).mean() return MSELR def next_batch(samples, labels, num): # Return a total of `num` random samples and labels. idx = np.random.choice(len(samples), num) return samples[idx], labels[idx] def standard_single_hidden_layer_autoencoder(X, units, O): reg_alpha = 1e-3 D = X.shape[1] weights = tf.get_variable("weights", [D, units]) biases = tf.get_variable("biases", [units]) X = tf.matmul(X, weights) + biases X = tf.layers.dense(X, O, kernel_regularizer = tf.contrib.layers.l2_regularizer(reg_alpha)) return X, weights def aefs_subset_selector(train, K, epoch_num=1000, alpha=0.1): D = train[0].shape[1] O = train[1].shape[1] learning_rate = 0.001 tf.reset_default_graph() X = tf.placeholder(tf.float32, (None, D)) TY = tf.placeholder(tf.float32, (None, O)) Y, weights = standard_single_hidden_layer_autoencoder(X, K, O) loss = tf.reduce_mean(tf.square(TY - Y)) + alpha * tf.reduce_sum(tf.sqrt(tf.reduce_sum(tf.square(weights), axis=1)), axis=0) + tf.losses.get_total_loss() train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss) init = tf.global_variables_initializer() batch_size = 8 batch_per_epoch = train[0].shape[0] // batch_size costs = [] session_config = tf.ConfigProto() session_config.gpu_options.allow_growth = False with tf.Session(config = session_config) as sess: sess.run(init) for ep in range(epoch_num): cost = 0 for batch_n in range(batch_per_epoch): imgs, yimgs = next_batch(train[0], train[1], batch_size) _, c, p = sess.run([train_op, loss, weights], feed_dict = {X: imgs, TY: yimgs}) cost += c / batch_per_epoch costs.append(cost) return list(np.argmax(np.abs(p), axis=0)), costs def AEFS(train, test, K, debug = True): x_train, x_val, y_train, y_val = train_test_split(train[0], train[1], test_size = 0.1) print("y_train.shape",y_train.shape) bindices = [] bmse = 1e100 for alpha in [1e-3, 1e-1, 1e1, 1e3]: print("alpha",alpha) indices, _ = aefs_subset_selector(train, K) mse = mse_check((train[0][:, indices], train[1]), (x_val[:, indices], y_val)) if bmse > mse: bmse = mse bindices = indices if debug: print(bindices, bmse) return train[0][:, bindices], test[0][:, bindices] #-------------------------------------------------------------------------------------------------------------------------------- def ETree(p_train_feature,p_train_label,p_test_feature,p_test_label,p_seed): clf = ExtraTreesClassifier(n_estimators=50, random_state=p_seed) # Training clf.fit(p_train_feature, p_train_label) # Training accuracy print('Training accuracy:',clf.score(p_train_feature, np.array(p_train_label))) print('Training accuracy:',accuracy_score(np.array(p_train_label),clf.predict(p_train_feature))) #print('Training accuracy:',np.sum(clf.predict(p_train_feature)==np.array(p_train_label))/p_train_label.shape[0]) # Testing accuracy print('Testing accuracy:',clf.score(p_test_feature, np.array(p_test_label))) print('Testing accuracy:',accuracy_score(np.array(p_test_label),clf.predict(p_test_feature))) #print('Testing accuracy:',np.sum(clf.predict(p_test_feature)==np.array(p_test_label))/p_test_label.shape[0]) #-------------------------------------------------------------------------------------------------------------------------------- def write_to_csv(p_data,p_path): dataframe = pd.DataFrame(p_data) dataframe.to_csv(p_path, mode='a',header=False,index=False,sep=',')
_____no_output_____
MIT
Python/AbsoluteAndOtherAlgorithms/8ProstateGE/AEFS_64.ipynb
xinxingwu-uk/UFS
2. Loading data
data_path="./Dataset/Prostate_GE.mat" Data = scipy.io.loadmat(data_path) data_arr=Data['X'] label_arr=Data['Y'][:, 0]-1 Data=MinMaxScaler(feature_range=(0,1)).fit_transform(data_arr) C_train_x,C_test_x,C_train_y,C_test_y= train_test_split(Data,label_arr,test_size=0.2,random_state=seed) print('Shape of C_train_x: ' + str(C_train_x.shape)) print('Shape of C_train_y: ' + str(C_train_y.shape)) print('Shape of C_test_x: ' + str(C_test_x.shape)) print('Shape of C_test_y: ' + str(C_test_y.shape)) key_feture_number=64
_____no_output_____
MIT
Python/AbsoluteAndOtherAlgorithms/8ProstateGE/AEFS_64.ipynb
xinxingwu-uk/UFS
3. Model
train=(C_train_x,C_train_x) test=(C_test_x,C_test_x) start = time.clock() C_train_selected_x, C_test_selected_x = AEFS((train[0], train[0]), (test[0], test[0]), key_feture_number) time_cost=time.clock() - start write_to_csv(np.array([time_cost]),"./log/AEFS_time"+str(key_feture_number)+".csv")
y_train.shape (72, 5966) alpha 0.001 WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version. Instructions for updating: Call initializer instance with the dtype argument instead of passing it to the constructor
MIT
Python/AbsoluteAndOtherAlgorithms/8ProstateGE/AEFS_64.ipynb
xinxingwu-uk/UFS
4. Classifying Extra Trees
train_feature=C_train_x train_label=C_train_y test_feature=C_test_x test_label=C_test_y print('Shape of train_feature: ' + str(train_feature.shape)) print('Shape of train_label: ' + str(train_label.shape)) print('Shape of test_feature: ' + str(test_feature.shape)) print('Shape of test_label: ' + str(test_label.shape)) p_seed=seed ETree(train_feature,train_label,test_feature,test_label,p_seed) train_feature=C_train_selected_x train_label=C_train_y test_feature=C_test_selected_x test_label=C_test_y print('Shape of train_feature: ' + str(train_feature.shape)) print('Shape of train_label: ' + str(train_label.shape)) print('Shape of test_feature: ' + str(test_feature.shape)) print('Shape of test_label: ' + str(test_label.shape)) p_seed=seed ETree(train_feature,train_label,test_feature,test_label,p_seed)
Shape of train_feature: (81, 64) Shape of train_label: (81,) Shape of test_feature: (21, 64) Shape of test_label: (21,) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.8571428571428571 Testing accuracy: 0.8571428571428571
MIT
Python/AbsoluteAndOtherAlgorithms/8ProstateGE/AEFS_64.ipynb
xinxingwu-uk/UFS
6. Reconstruction loss
from sklearn.linear_model import LinearRegression def mse_check(train, test): LR = LinearRegression(n_jobs = -1) LR.fit(train[0], train[1]) MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean() return MSELR train_feature_tuple=(C_train_selected_x,C_train_x) test_feature_tuple=(C_test_selected_x,C_test_x) reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple) print(reconstruction_loss)
0.27951798104721787
MIT
Python/AbsoluteAndOtherAlgorithms/8ProstateGE/AEFS_64.ipynb
xinxingwu-uk/UFS
Deprecated - Connecting Brain region through BAMS informationThis script connects brain regions through BAMS conenctivity informtation.However, at this level the connectivity information has no reference to the original, and that is not ok. Thereby do **not** use this.
### DEPRECATED import pandas as pd import re import itertools from difflib import SequenceMatcher root = "Data/csvs/basal_ganglia/regions" sim_csv_loc = "/region_similarity.csv" def similar(a, b): return SequenceMatcher(None, a, b).ratio() ## Prepare regions and regions_other csvs df_all_regions = pd.read_csv(root + "/all_regions.csv", dtype="object") df = pd.DataFrame(columns = ["ID1", "Region_name_1", "ID2", "Region_name_2", "Sim"]) # Put region names and ID into tuple list subset = df_all_regions[["ID", "Region_name"]] region_name_tuples = [tuple(x) for x in subset.to_numpy()] # Find all combinations of region_names and look at similarity in name for a, b in itertools.combinations(region_name_tuples, 2): id1, reg1 = a id2, reg2 = b sim_score = similar(reg1, reg2) if(sim_score > 0.7): a_row = pd.Series([id1, reg1, id2, reg2, sim_score], index = ["ID1", "Region_name_1", "ID2", "Region_name_2", "Sim"]) df = df.append(a_row, ignore_index=True) # Store similarities df_sorted = df.sort_values('Sim') df_sorted.to_csv(root + sim_csv_loc, encoding='utf-8') print("Similarities stored in", sim_csv_loc) def get_count_of_type(label, session): q = "MATCH (n:%s) RETURN count(n)" % label res = session.run(q) print("Added", res.value()[0], "nodes of type", label) def get_count_of_relationship(label, session): q = "MATCH ()-[r:%s]-() RETURN count(*)" %label res = session.run(q) print("Added", res.value()[0], "relationships of type", label) def get_csv_path(csv_file): path_all_csv = os.path.realpath("Data/csvs/basal_ganglia/regions") return os.path.join(path_all_csv, csv_file).replace("\\","/") ## Then find the regions that correspond to each other and stor that in a new CSV file # Add relation to all areas that define positions positioning = ["caudal", "rostral", "ventral", "dorsal"] area_describing = ["internal", "compact", "core", "shell"] df_sims = pd.read_csv(root + sim_csv_loc, converters = {"Sims": float}) # ALl with score above 0.95 are the same # Also the same: Substantia innominata, basal",103,"Substantia innominata, basal part" 0.91 df_equals = df_sims.loc[df_sims['Sim'] > 0.95] df_sorted.to_csv(root + "/regions_equal.csv", encoding='utf-8') from neo4j import GraphDatabase, basic_auth from dotenv import load_dotenv import os load_dotenv() neo4jUser = os.getenv("NEO4J_USER") neo4jPwd = os.getenv("NEO4J_PASSWORD") driver = GraphDatabase.driver("bolt://localhost:7687",auth=basic_auth(neo4jUser, neo4jPwd)) # Relationship EQUALS between equal BrainRegion nodes csv_file_path = "file:///%s" % get_csv_path("regions_equal.csv") query=""" LOAD CSV WITH HEADERS FROM "%s" AS row MATCH (a:BrainRegion { id: row.ID1}) MATCH (c:BrainRegion { id: row.ID2 }) MERGE (a)-[:EQUALS]->(c) """ % csv_file_path with driver.session() as session: session.run(query) get_count_of_relationship("EQUALS", session) ## TODO add rel for belongs-to/part of
Added 6124 relationships of type EQUALS
CC-BY-4.0
1. Extending the dataset with data from other sources/X - Deprecated - Connecting the regions through BAMS information.ipynb
marenpg/jupyter_basal_ganglia
Tile Coding---Tile coding is an innovative way of discretizing a continuous space that enables better generalization compared to a single grid-based approach. The fundamental idea is to create several overlapping grids or _tilings_; then for any given sample value, you need only check which tiles it lies in. You can then encode the original continuous value by a vector of integer indices or bits that identifies each activated tile. 1. Import the Necessary Packages
# Import common libraries import sys import gym import numpy as np import matplotlib.pyplot as plt # Set plotting options %matplotlib inline plt.style.use('ggplot') np.set_printoptions(precision=3, linewidth=120)
_____no_output_____
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
2. Specify the Environment, and Explore the State and Action SpacesWe'll use [OpenAI Gym](https://gym.openai.com/) environments to test and develop our algorithms. These simulate a variety of classic as well as contemporary reinforcement learning tasks. Let's begin with an environment that has a continuous state space, but a discrete action space.
# Create an environment env = gym.make('Acrobot-v1') env.seed(505); # Explore state (observation) space print("State space:", env.observation_space) print("- low:", env.observation_space.low) print("- high:", env.observation_space.high) # Explore action space print("Action space:", env.action_space)
WARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype. State space: Box(6,) - low: [ -1. -1. -1. -1. -12.566 -28.274] - high: [ 1. 1. 1. 1. 12.566 28.274] Action space: Discrete(3)
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
Note that the state space is multi-dimensional, with most dimensions ranging from -1 to 1 (positions of the two joints), while the final two dimensions have a larger range. How do we discretize such a space using tiles? 3. TilingLet's first design a way to create a single tiling for a given state space. This is very similar to a uniform grid! The only difference is that you should include an offset for each dimension that shifts the split points.For instance, if `low = [-1.0, -5.0]`, `high = [1.0, 5.0]`, `bins = (10, 10)`, and `offsets = (-0.1, 0.5)`, then return a list of 2 NumPy arrays (2 dimensions) each containing the following split points (9 split points per dimension):```[array([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7]), array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5])]```Notice how the split points for the first dimension are offset by `-0.1`, and for the second dimension are offset by `+0.5`. This might mean that some of our tiles, especially along the perimeter, are partially outside the valid state space, but that is unavoidable and harmless.
def float_range(start: float, stop: float, step_size: float): count: int = 0 while True: temp = start + count * step_size if step_size > 0 and temp >= stop: break if step_size < 0 and temp <= stop: break yield temp count += 1 def create_tiling_grid(low, high, bins=(10, 10), offsets=(0.0, 0.0)): """Define a uniformly-spaced grid that can be used for tile-coding a space. Parameters ---------- low : array_like Lower bounds for each dimension of the continuous space. high : array_like Upper bounds for each dimension of the continuous space. bins : tuple Number of bins or tiles along each corresponding dimension. offsets : tuple Split points for each dimension should be offset by these values. Returns ------- grid : list of array_like A list of arrays containing split points for each dimension. """ tiling_grid_d = [] for d in range(0, len(bins)): low_bound_d = low[d] high_bound_d = high[d] range_d = abs(high_bound_d - low_bound_d) step_size_d = range_d / bins[d] offset_d = offsets[d] raw_tiling_grid_d = [x for x in \ float_range(low_bound_d + step_size_d + offset_d, \ high_bound_d, step_size_d)] tiling_grid_d.append(raw_tiling_grid_d[:(bins[d]-1)]) return tiling_grid_d low = [-1.0, -5.0] high = [1.0, 5.0] create_tiling_grid(low, high, bins=(10, 10), offsets=(-0.1, 0.5)) # [test]
_____no_output_____
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
You can now use this function to define a set of tilings that are a little offset from each other.
def create_tilings(low, high, tiling_specs): """Define multiple tilings using the provided specifications. Parameters ---------- low : array_like Lower bounds for each dimension of the continuous space. high : array_like Upper bounds for each dimension of the continuous space. tiling_specs : list of tuples A sequence of (bins, offsets) to be passed to create_tiling_grid(). Returns ------- tilings : list A list of tilings (grids), each produced by create_tiling_grid(). """ return [create_tiling_grid(low, high, bins, offset) for bins, offset in tiling_specs] # Tiling specs: [(<bins>, <offsets>), ...] tiling_specs = [((10, 10), (-0.066, -0.33)), ((10, 10), (0.0, 0.0)), ((10, 10), (0.066, 0.33))] tilings = create_tilings(low, high, tiling_specs)
_____no_output_____
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
It may be hard to gauge whether you are getting desired results or not. So let's try to visualize these tilings.
from matplotlib.lines import Line2D def visualize_tilings(tilings): """Plot each tiling as a grid.""" prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] linestyles = ['-', '--', ':'] legend_lines = [] fig, ax = plt.subplots(figsize=(10, 10)) for i, grid in enumerate(tilings): for x in grid[0]: l = ax.axvline(x=x, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)], label=i) for y in grid[1]: l = ax.axhline(y=y, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)]) legend_lines.append(l) ax.grid('off') ax.legend(legend_lines, ["Tiling #{}".format(t) for t in range(len(legend_lines))], facecolor='white', framealpha=0.9) ax.set_title("Tilings") return ax # return Axis object to draw on later, if needed visualize_tilings(tilings);
_____no_output_____
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
Great! Now that we have a way to generate these tilings, we can next write our encoding function that will convert any given continuous state value to a discrete vector. 4. Tile EncodingImplement the following to produce a vector that contains the indices for each tile that the input state value belongs to. The shape of the vector can be the same as the arrangment of tiles you have, or it can be ultimately flattened for convenience.You can use the same `discretize()` function here from grid-based discretization, and simply call it for each tiling.
def discretize(sample, grid): """Discretize a sample as per given grid. Parameters ---------- sample : array_like A single sample from the (original) continuous space. grid : list of array_like A list of arrays containing split points for each dimension. Returns ------- discretized_sample : array_like A sequence of integers with the same number of dimensions as sample. """ digitized_d = () for dimension in range(0, len(sample)): digitized_d = digitized_d + (int(np.digitize(sample[dimension], grid[dimension], right=False)),) return digitized_d def tile_encode(sample, tilings, flatten=False): """Encode given sample using tile-coding. Parameters ---------- sample : array_like A single sample from the (original) continuous space. tilings : list A list of tilings (grids), each produced by create_tiling_grid(). flatten : bool If true, flatten the resulting binary arrays into a single long vector. Returns ------- encoded_sample : list or array_like A list of binary vectors, one for each tiling, or flattened into one. """ encoded_tiles = [discretize(sample, tiling) for tiling in tilings] if flatten: return np.concatenate(encoded_tiles) else: return encoded_tiles # Test with some sample values samples = [(-1.2 , -5.1 ), (-0.75, 3.25), (-0.5 , 0.0 ), ( 0.25, -1.9 ), ( 0.15, -1.75), ( 0.75, 2.5 ), ( 0.7 , -3.7 ), ( 1.0 , 5.0 )] encoded_samples = [tile_encode(sample, tilings) for sample in samples] print("\nSamples:", repr(samples), sep="\n") print("\nEncoded samples:", repr(encoded_samples), sep="\n")
Samples: [(-1.2, -5.1), (-0.75, 3.25), (-0.5, 0.0), (0.25, -1.9), (0.15, -1.75), (0.75, 2.5), (0.7, -3.7), (1.0, 5.0)] Encoded samples: [[(0, 0), (0, 0), (0, 0)], [(1, 8), (1, 8), (0, 7)], [(2, 5), (2, 5), (2, 4)], [(6, 3), (6, 3), (5, 2)], [(6, 3), (5, 3), (5, 2)], [(9, 7), (8, 7), (8, 7)], [(8, 1), (8, 1), (8, 0)], [(9, 9), (9, 9), (9, 9)]]
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
Note that we did not flatten the encoding above, which is why each sample's representation is a pair of indices for each tiling. This makes it easy to visualize it using the tilings.
from matplotlib.patches import Rectangle def visualize_encoded_samples(samples, encoded_samples, tilings, low=None, high=None): """Visualize samples by activating the respective tiles.""" samples = np.array(samples) # for ease of indexing # Show tiling grids ax = visualize_tilings(tilings) # If bounds (low, high) are specified, use them to set axis limits if low is not None and high is not None: ax.set_xlim(low[0], high[0]) ax.set_ylim(low[1], high[1]) else: # Pre-render (invisible) samples to automatically set reasonable axis limits, and use them as (low, high) ax.plot(samples[:, 0], samples[:, 1], 'o', alpha=0.0) low = [ax.get_xlim()[0], ax.get_ylim()[0]] high = [ax.get_xlim()[1], ax.get_ylim()[1]] # Map each encoded sample (which is really a list of indices) to the corresponding tiles it belongs to tilings_extended = [np.hstack((np.array([low]).T, grid, np.array([high]).T)) for grid in tilings] # add low and high ends tile_centers = [(grid_extended[:, 1:] + grid_extended[:, :-1]) / 2 for grid_extended in tilings_extended] # compute center of each tile tile_toplefts = [grid_extended[:, :-1] for grid_extended in tilings_extended] # compute topleft of each tile tile_bottomrights = [grid_extended[:, 1:] for grid_extended in tilings_extended] # compute bottomright of each tile prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] for sample, encoded_sample in zip(samples, encoded_samples): for i, tile in enumerate(encoded_sample): # Shade the entire tile with a rectangle topleft = tile_toplefts[i][0][tile[0]], tile_toplefts[i][1][tile[1]] bottomright = tile_bottomrights[i][0][tile[0]], tile_bottomrights[i][1][tile[1]] ax.add_patch(Rectangle(topleft, bottomright[0] - topleft[0], bottomright[1] - topleft[1], color=colors[i], alpha=0.33)) # In case sample is outside tile bounds, it may not have been highlighted properly if any(sample < topleft) or any(sample > bottomright): # So plot a point in the center of the tile and draw a connecting line cx, cy = tile_centers[i][0][tile[0]], tile_centers[i][1][tile[1]] ax.add_line(Line2D([sample[0], cx], [sample[1], cy], color=colors[i])) ax.plot(cx, cy, 's', color=colors[i]) # Finally, plot original samples ax.plot(samples[:, 0], samples[:, 1], 'o', color='r') ax.margins(x=0, y=0) # remove unnecessary margins ax.set_title("Tile-encoded samples") return ax visualize_encoded_samples(samples, encoded_samples, tilings);
_____no_output_____
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
Inspect the results and make sure you understand how the corresponding tiles are being chosen. Note that some samples may have one or more tiles in common. 5. Q-Table with Tile CodingThe next step is to design a special Q-table that is able to utilize this tile coding scheme. It should have the same kind of interface as a regular table, i.e. given a `` pair, it should return a ``. Similarly, it should also allow you to update the `` for a given `` pair (note that this should update all the tiles that `` belongs to).The `` supplied here is assumed to be from the original continuous state space, and `` is discrete (and integer index). The Q-table should internally convert the `` to its tile-coded representation when required.
class QTable: """Simple Q-table.""" def __init__(self, state_size, action_size): """Initialize Q-table. Parameters ---------- state_size : tuple Number of discrete values along each dimension of state space. action_size : int Number of discrete actions in action space. """ self.state_size = state_size self.action_size = action_size self.q_table = np.zeros(shape=(self.state_size + (self.action_size,))) # TODO: Create Q-table, initialize all Q-values to zero # Note: If state_size = (9, 9), action_size = 2, q_table.shape should be (9, 9, 2) print("QTable(): size =", self.q_table.shape) class TiledQTable: """Composite Q-table with an internal tile coding scheme.""" def __init__(self, low, high, tiling_specs, action_size): """Create tilings and initialize internal Q-table(s). Parameters ---------- low : array_like Lower bounds for each dimension of state space. high : array_like Upper bounds for each dimension of state space. tiling_specs : list of tuples A sequence of (bins, offsets) to be passed to create_tilings() along with low, high. action_size : int Number of discrete actions in action space. """ self.tilings = create_tilings(low, high, tiling_specs) self.state_sizes = [tuple(len(splits)+1 for splits in tiling_grid) for tiling_grid in self.tilings] self.action_size = action_size self.q_tables = [QTable(state_size, self.action_size) for state_size in self.state_sizes] print("TiledQTable(): no. of internal tables = ", len(self.q_tables)) def get(self, state, action): """Get Q-value for given <state, action> pair. Parameters ---------- state : array_like Vector representing the state in the original continuous space. action : int Index of desired action. Returns ------- value : float Q-value of given <state, action> pair, averaged from all internal Q-tables. """ # TODO: Encode state to get tile indices state_encoding = tile_encode(state, self.tilings) # TODO: Retrieve q-value for each tiling, and return their average action_value: float = 0.0 for i, tile_q_table in enumerate(self.q_tables): action_value += tile_q_table.q_table[tuple(state_encoding[i] + (action,))] return action_value / len(self.q_tables) def update(self, state, action, value, alpha=0.1): """Soft-update Q-value for given <state, action> pair to value. Instead of overwriting Q(state, action) with value, perform soft-update: Q(state, action) = alpha * value + (1.0 - alpha) * Q(state, action) Parameters ---------- state : array_like Vector representing the state in the original continuous space. action : int Index of desired action. value : float Desired Q-value for <state, action> pair. alpha : float Update factor to perform soft-update, in [0.0, 1.0] range. """ # TODO: Encode state to get tile indices state_encoding = tile_encode(state, self.tilings) # TODO: Update q-value for each tiling by update factor alpha for i, tile_q_table in enumerate(self.q_tables): q_table_value = tile_q_table.q_table[tuple(state_encoding[i] + (action,))] new_value = alpha * value + (1.0 - alpha) * q_table_value tile_q_table.q_table[tuple(state_encoding[i] + (action,))] = new_value # Test with a sample Q-table tq = TiledQTable(low, high, tiling_specs, 2) s1 = 3; s2 = 4; a = 0; q = 1.0 print("[GET] Q({}, {}) = {}".format(samples[s1], a, tq.get(samples[s1], a))) # check value at sample = s1, action = a print("[UPDATE] Q({}, {}) = {}".format(samples[s2], a, q)); tq.update(samples[s2], a, q) # update value for sample with some common tile(s) print("[GET] Q({}, {}) = {}".format(samples[s1], a, tq.get(samples[s1], a))) # check value again, should be slightly updated
QTable(): size = (10, 10, 2) QTable(): size = (10, 10, 2) QTable(): size = (10, 10, 2) TiledQTable(): no. of internal tables = 3 [GET] Q((0.25, -1.9), 0) = 0.0 [UPDATE] Q((0.15, -1.75), 0) = 1.0 [GET] Q((0.25, -1.9), 0) = 0.06666666666666667
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
If you update the q-value for a particular state (say, `(0.25, -1.91)`) and action (say, `0`), then you should notice the q-value of a nearby state (e.g. `(0.15, -1.75)` and same action) has changed as well! This is how tile-coding is able to generalize values across the state space better than a single uniform grid. 6. Implement a Q-Learning Agent using Tile-CodingNow it's your turn to apply this discretization technique to design and test a complete learning agent!
class QLearningAgentTileCoding: """Q-Learning agent that can act on a continuous state space by discretizing it.""" def __init__(self, env, tiled_q_table, alpha=0.02, gamma=0.99, epsilon=1.0, epsilon_decay_rate=0.9995, min_epsilon=.01, seed=123): """Initialize variables, create grid for discretization.""" # Environment info self.env = env self.state_size = tiled_q_tables.state_sizes self.action_size = self.env.action_space.n # 1-dimensional discrete action space self.seed = np.random.seed(seed) print("Environment:", self.env) print("State space size:", self.state_size) print("Action space size:", self.action_size) # Learning parameters self.alpha = alpha # learning rate self.gamma = gamma # discount factor self.epsilon = self.initial_epsilon = epsilon # initial exploration rate self.epsilon_decay_rate = epsilon_decay_rate # how quickly should we decrease epsilon self.min_epsilon = min_epsilon # Create Q-table self.tiled_q_table = tiled_q_table def reset_episode(self, state): """Reset variables for a new episode.""" # Gradually decrease exploration rate self.epsilon *= self.epsilon_decay_rate self.epsilon = max(self.epsilon, self.min_epsilon) # Decide initial action self.last_state = state Q_state = [self.tiled_q_table.get(state, action) for action in range(self.action_size)] self.last_action = np.argmax(Q_state) return self.last_action def reset_exploration(self, epsilon=None): """Reset exploration rate used when training.""" self.epsilon = epsilon if epsilon is not None else self.initial_epsilon def act(self, state, reward=None, done=None, mode='train'): """Pick next action and update internal Q table (when mode != 'test').""" Q_state = [self.tiled_q_table.get(state, action) for action in range(self.action_size)] if mode == 'test': # Test mode: Simply produce an action action = np.argmax(Q_state) else: # Train mode (default): Update Q table, pick next action # Note: We update the Q table entry for the *last* (state, action) pair with current state, reward action_value = reward + self.gamma * max(Q_state) self.tiled_q_table.update(self.last_state, self.last_action, action_value, self.alpha) # Exploration vs. exploitation do_exploration = np.random.uniform(0, 1) < self.epsilon if do_exploration: # Pick a random action action = np.random.randint(0, self.action_size) else: # Pick the best action from Q table action = np.argmax(Q_state) # Roll over current state, action for next step self.last_state = state self.last_action = action return action n_bins = 10 obs_space = env.observation_space n_actions = env.action_space.n obs_space_shape = env.observation_space.shape[0] bins = tuple([n_bins]*obs_space_shape) offset_positions = (obs_space.high - obs_space.low)/(3*n_bins) tiling_specifications = [(bins, -offset_positions), (bins, tuple([0.0] * obs_space_shape)), (bins, +offset_positions)] tiled_q_tables = TiledQTable(obs_space.low, obs_space.high, tiling_specifications, n_actions) agent = QLearningAgentTileCoding(env=env, tiled_q_table=tiled_q_tables) print(f'''Observation Space Shape: {obs_space_shape}''') print(f'''Bins: {bins}''') print(f'''Offsets: {offset_positions}''') print(f'''Tilings: {tiling_specifications}''') def run(agent, env, num_episodes=10000, mode='train'): """Run agent in given reinforcement learning environment and return scores.""" scores = [] max_avg_score = -np.inf for i_episode in range(1, num_episodes+1): # Initialize episode state = env.reset() action = agent.reset_episode(state) total_reward = 0 done = False # Roll out steps until done while not done: state, reward, done, info = env.step(action) total_reward += reward action = agent.act(state, reward, done, mode) # Save final score scores.append(total_reward) # Print episode stats if mode == 'train': if len(scores) > 100: avg_score = np.mean(scores[-100:]) if avg_score > max_avg_score: max_avg_score = avg_score if i_episode % 100 == 0: print("\rEpisode {}/{} | Max Average Score: {}".format(i_episode, num_episodes, max_avg_score), end="") sys.stdout.flush() return scores scores = run(agent, env) import pandas as pd def plot_scores(scores, rolling_window=100): """Plot scores and optional rolling mean using specified window.""" plt.plot(scores); plt.title("Scores"); rolling_mean = pd.Series(scores).rolling(rolling_window).mean() plt.plot(rolling_mean); return rolling_mean rolling_mean = plot_scores(scores)
_____no_output_____
MIT
tile-coding/Tile_Coding.ipynb
kw90/deep-reinforcement-learning
Importando biblioteca Pandas
import pandas as pd
_____no_output_____
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Carregando o dataset na variável data
data = pd.read_csv('datasets\kc_house_data.csv')
_____no_output_____
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Selecionando pelos nomes
print(data[['id', 'date', 'price']])
_____no_output_____
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Selecionando pelos índices
print(data.iloc[0:10, 1:4])
date price bedrooms 0 20141013T000000 221900.0 3 1 20141209T000000 538000.0 3 2 20150225T000000 180000.0 2 3 20141209T000000 604000.0 4 4 20150218T000000 510000.0 3 5 20140512T000000 1225000.0 4 6 20140627T000000 257500.0 3 7 20150115T000000 291850.0 3 8 20150415T000000 229500.0 3 9 20150312T000000 323000.0 3
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Respondendo as perguntas de negócio Data do imóvel mais antigo
data['date'] = pd.to_datetime(data['date']) data.sort_values('date', ascending=True)
_____no_output_____
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Determinar o maior numero de andares e contar quantos temos por andar
data['floors'].unique() print(data.loc[data['floors'] == 3.5].shape)
(8, 21)
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Criando classificação
data['level'] = 'standard' data.loc[data['price'] > 540000, 'level'] = 'high_level' data.loc[data['price'] < 540000, 'level'] = 'low_level' data.head()
_____no_output_____
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Relatório ordenado pelo preço
report = data[['id', 'date', 'price', 'bedrooms', 'sqft_lot', 'level']].sort_values('price', ascending=False) report.to_csv('datasets/report_aula02.csv', index=False)
_____no_output_____
MIT
aulas/aula2.ipynb
artuguen28/Do_Zero_Ao_DS
Sorting 1. Bubble: $O(n^2)$repeatedly swapping the adjacent elements if they are in wrong order 2. Selection: $O(n^2)$find largest number and place it in the correct order 3. Insertion: $O(n^2)$ 4. Shell: $O(n^2)$ 5. Merge: $O(n \log n)$ 6. Quick: $O(n \log n)$it is important to select proper pivot 7. Counting: $O(n)$ 8. Radix: $O(n)$ 9. Bucket: $O(n)$ --- Bubble
def bubble(arr): n = len(arr) for i in range(n): # (n-1)-(i): 뒤에서부터 i+1 번째 idx # 0번째 -> 커서가 n-1까지 움직임 # 1번째 -> 커서가 n-1-1 for j in range(0, (n-1)-i): print(j) if arr[j] > arr[j+1]: arr[j], arr[j+1] = arr[j+1], arr[j] def bubble(arr): n = len(arr) for i in range(n): for j in range(0, n-1-(i+1)) arr = [64, 34, 25, 12, 22, 11, 90] bubble(arr) arr def bubble2(arr): n = len(arr) for i in range(n): swapped = False for j in range(0, n-1-i): if arr[j] > arr[j+1]: arr[j], arr[j+1] = arr[j+1], arr[j] # 정렬 안된 부분이 있음 swapped = True if swapped == False: break def b(arr): n = len(arr) for i in range(n): swapped = False for j in range(0, n-1-i): if arr[j] > arr[j+1]: swapped = True arr[j], arr[j+1] = arr[j+1], arr[j] if swapped == False: return
_____no_output_____
MIT
01.Algorithm/algorithm.ipynb
HenryPaik1/Study
Selection Sorting
def Selection(arr): n = len(arr) for i in range(n-1, 0, -1): positionOfMax=0 for loc in range(1, i+1): if arr[loc] > arr[positionOfMax]: positionOfMax = loc arr[i], arr[loc] = arr[loc], arr[i] # test code arr = [54,26,93,17,77,31,44,55,20] Selection(arr) print(arr)
[54, 26, 93, 17, 77, 31, 44, 55, 20]
MIT
01.Algorithm/algorithm.ipynb
HenryPaik1/Study
Quick
# partition은 cur가 앞에서부터 high까지 순회하면서 def partition(arr, low, high): i = low - 1 pivot = arr[high] for cur in range(low, high): print(cur, i) if arr[cur] <= pivot: i += 1 arr[i], arr[cur] = arr[cur], arr[i] arr[i+1], arr[high] = arr[high], arr[i+1] return i+1 def QuickSort(arr, low, high): if low < high: pi = partition(arr, low, high) # 절반 중 1 QuickSort(arr, low, pi-1) # 절반 중 2 QuickSort(arr, pi+1, high) # test code arr = [10, 7, 8, 9, 1, 5] n = len(arr) QuickSort(arr, 0, n-1) for i in range(n): print(arr[i])
0 -1 1 -1 2 -1 3 -1 4 -1 2 1 3 1 4 1 3 2 4 2 4 3 1 5 7 8 9 10
MIT
01.Algorithm/algorithm.ipynb
HenryPaik1/Study
Quick2
def partition(arr, start, end): povot = arr[start] i = start + 1 j = end -1 while True: # i: traverse from begin # j: traverse from end # if arr[i](left side of pivot) smaller than pivot, then pass while (i <= j and arr[i] <= pivot): i += 1 # if arr[j](right side of pivot) larger than pivot, then pass while (i <= j and arr[j] >= pivot): j -= 1 if i <= j: arr[i], arr[j] = arr[j], arr[i] print(start) # i, j가 엇갈리면 left side of pivot의 맨 오른쪽 값과 pivot(맨앞) 자리바꿈 else: arr[start], arr[j] = arr[j], arr[start] return j def quicksort(arr, start, end): if end - start > 1: # p: pivot location p = partition(arr, start, end) quicksort(arr, start=start, end=p) quicksort(arr, start=p+1, end=end)
_____no_output_____
MIT
01.Algorithm/algorithm.ipynb
HenryPaik1/Study
계수정렬 Counting Sort- reference: https://www.geeksforgeeks.org/radix-sort/- count_arr: count how many each of 0,1,2,...,n is in arr- iter 0, 1, ..., n- fill ans with 0, 1, ..., n
# 핵심은 counting arr생성 # 갯수만큼 itter def counting_sort(arr, max_val): count_arr = [0 for _ in range(max_val)] for num in arr: count_arr[num] += 1 i = 0 for num in range(max_val): iter_n = count_arr[num] for _ in range(iter_n): arr[i] = num i += 1 return arr # test code arr = [5,1,5,1,1,2,4,3,4,3,2] max_val = 6 counting_sort(arr, max_val)
_____no_output_____
MIT
01.Algorithm/algorithm.ipynb
HenryPaik1/Study
기수정렬 Radix Sort 핵심- `숫자 //` 원하는 `digit`(첫쨰 자리: 1, 둘째 자리: 10, ...) `% 10`- `// 10^(digit-1)`: 끝자리가 내가 원하는 digit의 숫자가 됨 - eg. 25948의 끝에서 셋째 자리 9를 끝자리로 만드려면, 25948 // 10^(3-1) = 259- `%10`: 마지막 끝자리만 남김
4378 // 10**(4-1) % 10 def SortingByDigit(arr, exp): n = len(arr) output = [0 for _ in range(n)] count = [0 for _ in range(10)] for num in arr: last_digit = num // exp % 10 count[last_digit] += 1 i = 1 while i < max_: count[i] += count[i-1] i += 1 print('digit:', np.log10(exp)+1) print(count) # 왜 거꾸로 iter? 마지막 가장 큰 digit에 근거해 배열할 때 필요 i = n-1 while i >= 0: last_digit = (arr[i] // exp) % 10 idx_by_cum = count[last_digit] output[idx_by_cum - 1] = arr[i] count[last_digit] -= 1 i -= 1 print(count) # update arr i = 0 for i in range(0,len(arr)): arr[i] = output[i] # arr = [i for i in output] print(arr) print() def radixSort(arr): max_ = max(arr) exp = 1 while (max_ // exp) > 0: print(max_, exp) SortingByDigit(arr, exp) exp *= 10 # test code arr = [170, 5145, 3145, 2145, 802, 24] radixSort(arr)
5145 1 digit: 1.0 [1, 1, 2, 2, 3, 6, 6, 6, 6, 6] [0, 1, 1, 2, 2, 3, 6, 6, 6, 6] [170, 802, 24, 5145, 3145, 2145] 5145 10 digit: 2.0 [1, 1, 2, 2, 5, 5, 5, 6, 6, 6] [0, 1, 1, 2, 2, 5, 5, 5, 6, 6] [802, 24, 5145, 3145, 2145, 170] 5145 100 digit: 3.0 [1, 5, 5, 5, 5, 5, 5, 5, 6, 6] [0, 1, 5, 5, 5, 5, 5, 5, 5, 6] [24, 5145, 3145, 2145, 170, 802] 5145 1000 digit: 4.0 [3, 3, 4, 5, 5, 6, 6, 6, 6, 6] [0, 3, 3, 4, 5, 5, 6, 6, 6, 6] [24, 170, 802, 2145, 3145, 5145]
MIT
01.Algorithm/algorithm.ipynb
HenryPaik1/Study
Airtable - Get data **Tags:** airtable database productivity spreadsheet naas_drivers operations snippet dataframe **Author:** [Jeremy Ravenel](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/) Input Import library
from naas_drivers import airtable
_____no_output_____
BSD-3-Clause
Airtable/Airtable_Get_data.ipynb
techthiyanes/awesome-notebooks
Variables
API_KEY = 'API_KEY' BASE_KEY = 'BASE_KEY' TABLE_NAME = 'TABLE_NAME'
_____no_output_____
BSD-3-Clause
Airtable/Airtable_Get_data.ipynb
techthiyanes/awesome-notebooks
Model Connect to airtable and get data
df = naas_drivers.airtable.connect(API_KEY, BASE_KEY, TABLE_NAME).get(view='All opportunities', maxRecords=20)
_____no_output_____
BSD-3-Clause
Airtable/Airtable_Get_data.ipynb
techthiyanes/awesome-notebooks
Output Display result
df
_____no_output_____
BSD-3-Clause
Airtable/Airtable_Get_data.ipynb
techthiyanes/awesome-notebooks
Physically labeled data: pyfocs single-ended examplesFinally, after all of that (probably confusing) work we can map the data to physical coordinates.
import xarray as xr import pyfocs import os
/Users/karllapo/anaconda3/lib/python3.7/typing.py:847: FutureWarning: xarray subclass DataStore should explicitly define __slots__ super().__init_subclass__(*args, **kwargs)
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
1. Load data 1.1 Configuration filesAs in the previous example we will load and prepare the configuration files. This time we will load all the configuration files.Physically labeled data is triggered by setting the below flag within the configuration file.```pythonfinal_flag = True```
dir_example = os.path.join('../tests/data/') # Grab a configuration file for the twisted pair pvc fiber and for the stainless steel fiber config_names = [ 'example_configuration_steelfiber.yml', 'example_twistedpair_bothwls.yml', 'example_twistedpair_p1wls.yml', 'example_twistedpair_p2wls.yml', ] cfg_fname = os.path.join(dir_example, config_names[0]) cfg_ss, lib_ss = pyfocs.check.config(cfg_fname, ignore_flags=True) cfg_fname = os.path.join(dir_example, config_names[1]) cfg_both, lib_both = pyfocs.check.config(cfg_fname, ignore_flags=True) cfg_fname = os.path.join(dir_example, config_names[2]) cfg_p1, lib_p1 = pyfocs.check.config(cfg_fname, ignore_flags=True) cfg_fname = os.path.join(dir_example, config_names[3]) cfg_p2, lib_p2 = pyfocs.check.config(cfg_fname, ignore_flags=True)
_____no_output_____
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
1.2 Data- In this case we only use a single twisted pair, p1, since it is closer to the DTS device in LAF space yielding a less noisy signal.- Additionally, we will load the paired heated-unheated stainless steel fiber that has been interpolated to a common spatial index.
ds_p1 = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_p1-wls_unheated.nc')) ds_p2 = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_p2-wls_unheated.nc')) ds_cold = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_ss-wls_unheated.nc')) ds_heat = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_ss-wls_heated.nc')) print('=================') print('Unheated fibers - Twisted PVC fiber, pair 1') print(ds_p1) print('') print('=================') print('Unheated fibers - Twisted PVC fiber, pair 2') print(ds_p2) print('') print('=================') print('Unheated fibers - stainless steel') print(ds_cold) print('') print('=================') print('Heated fibers - stainless steel') print(ds_heat) print('')
================= Unheated fibers - Twisted PVC fiber, pair 1 <xarray.Dataset> Dimensions: (time: 60, xyz: 1612) Coordinates: * time (time) datetime64[ns] 2019-07-22T00:00:05 ... 2019-07-22T00:05:00 LAF (xyz) float64 ... unheated (xyz) object ... x (xyz) float64 ... y (xyz) float64 ... z (xyz) float64 ... Dimensions without coordinates: xyz Data variables: cal_temp (time, xyz) float64 ... Attributes: dt: 5s dLAF: 0.254 unheated: IR_NE1_p1;IR_NE1_p2;IR_NE2_p1;IR_NE2_p2;IR_NW_p1;IR_NW_p2;IR_S... ================= Unheated fibers - Twisted PVC fiber, pair 2 <xarray.Dataset> Dimensions: (time: 60, xyz: 1612) Coordinates: * time (time) datetime64[ns] 2019-07-22T00:00:05 ... 2019-07-22T00:05:00 LAF (xyz) float64 ... unheated (xyz) object ... x (xyz) float64 ... y (xyz) float64 ... z (xyz) float64 ... Dimensions without coordinates: xyz Data variables: cal_temp (time, xyz) float64 ... Attributes: dt: 5s dLAF: 0.254 unheated: IR_NE1_p1;IR_NE1_p2;IR_NE2_p1;IR_NE2_p2;IR_NW_p1;IR_NW_p2;IR_S... ================= Unheated fibers - stainless steel <xarray.Dataset> Dimensions: (time: 60, xyz: 2377) Coordinates: * time (time) datetime64[ns] 2019-07-22T00:00:05 ... 2019-07-22T00:05:00 LAF (xyz) float64 ... unheated (xyz) object ... x (xyz) float64 ... y (xyz) float64 ... z (xyz) float64 ... Dimensions without coordinates: xyz Data variables: cal_temp (time, xyz) float64 ... Attributes: dt: 5s dLAF: 0.254 ================= Heated fibers - stainless steel <xarray.Dataset> Dimensions: (time: 60, xyz: 2377) Coordinates: * time (time) datetime64[ns] 2019-07-22T00:00:05 ... 2019-07-22T00:05:00 LAF (xyz) float64 ... heated (xyz) object ... x (xyz) float64 ... y (xyz) float64 ... z (xyz) float64 ... Dimensions without coordinates: xyz Data variables: cal_temp (time, xyz) float64 ... Attributes: dt: 5s dLAF: 0.254
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
Here we see that all datasets now have `x`, `y`, and `z` coordinates which are labeled using the `xyz` multiindex. Other quantities have been dropped.The netcdf files are also now labeled differently. Channel information has been excluded and there is now a label on the location type at the end of the file name. 2. Calculate wind speed 2.1 Construct the power variableHere I will construct a data variable of power. The details on what is happening here are not important besides `power` is a data variable with dimensions of LAF. The wind speed code can accept `power` as a DataArray with dimensions shared with `cal_temp` or as a single float.
import numpy as np power_loc = { '1': [1892.5, 2063.5], '2': [2063.5, 2205.5], '3': [2207.0, 2361.], '4': [2361., 2524.]} power_vals = { '1': 6.1, '2': 6.4, '3': 4.7, '4': 5.4,} ds_heat['power'] = ('LAF', np.zeros_like(ds_heat.LAF)) for p in power_vals: laf_mask = ((ds_heat.LAF > power_loc[p][0]) & (ds_heat.LAF < power_loc[p][1])) ds_heat['power'] = xr.where(laf_mask, np.ones_like(ds_heat.LAF.values) * power_vals[p], ds_heat.power.values)
_____no_output_____
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
2.2 Calculate wind speed
wind_speed = pyfocs.wind_speed.calculate(ds_heat.cal_temp, ds_cold.cal_temp, ds_heat.power)
Converted air temperature from Celsius to Kelvin. Converted air temperature from Celsius to Kelvin. Converted air temperature from Celsius to Kelvin. Converted air temperature from Celsius to Kelvin.
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
2.3 Split up wind speed basedWind speed is most efficiently measured in the direction orthogonal to the fiber. Since we have fibers that are orthogonal to each other that means we effectively measured wind in two different directions. We represent that here by combining sections that are parallel to each other.
cross_valley_components = ['OR_SE', 'OR_NW'] logic = [wind_speed.unheated == l for l in cross_valley_components] logic = xr.concat(logic, dim='locations').any(dim='locations') wind_speed_cross_valley = wind_speed.where(logic, drop=True) along_valley_components = ['OR_SW2', 'OR_SW1', 'OR_NE1', 'OR_NE2'] logic = [wind_speed.unheated == l for l in along_valley_components] logic = xr.concat(logic, dim='locations').any(dim='locations') wind_speed_along_valley = wind_speed.where(logic, drop=True)
_____no_output_____
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
2.4 Create a Dataset that contains all unheated data
unheated = xr.concat([ds_cold, ds_p1], dim='xyz', coords='different')
_____no_output_____
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
3. Plot your Fiber Optic Distributed Sensing data 3.1 Wind speed and temperature
import matplotlib.pyplot as plt fig = plt.figure(figsize=(12, 6),) spec = fig.add_gridspec(ncols=4, nrows=2, width_ratios=[1, 0.08, 0.04, 0.08], hspace=0.18, wspace=0.25, ) ax_ew_cbar = fig.add_subplot(spec[0, 3]) ax_ns_cbar = fig.add_subplot(spec[1, 3]) ax_t_cbar = fig.add_subplot(spec[:, 1]) ax_temp = fig.add_subplot(spec[:, 0]) im = ax_temp.scatter(unheated.x, unheated.y, s=10, c=unheated.mean(dim='time').cal_temp.values, cmap='viridis', vmin=8.5, vmax=10) ax_temp.set_ylabel('Relative Northing (m)') ax_temp.set_xlabel('Relative Easting (m)') plt.colorbar(im, cax=ax_t_cbar, extend='both') ax_t_cbar.set_ylabel('Temperature (C)') ax_temp.set_title('a) LOVE19 Outer Array', loc='left') im = ax_temp.scatter(wind_speed_along_valley.x * 1.1, wind_speed_along_valley.y * 1.1, s=10, c=wind_speed_along_valley.mean(dim='time').values, cmap='Oranges', vmin=0.5, vmax=4) plt.colorbar(im, cax=ax_ew_cbar, extend='max') ax_ew_cbar.set_ylabel('Along valley wind (m/s)') im = ax_temp.scatter(wind_speed_cross_valley.x * 1.1, wind_speed_cross_valley.y * 1.1, s=10, c=wind_speed_cross_valley.mean(dim='time').values, cmap='Blues', vmin=0.5, vmax=4) plt.colorbar(im, cax=ax_ns_cbar, extend='max') ax_ns_cbar.set_ylabel('Cross valley wind (m/s)')
_____no_output_____
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
3.2 Biases in space
ds_p2 = ds_p2.interp_like(ds_p1) fig = plt.figure(figsize=(8, 6),) spec = fig.add_gridspec(ncols=2, nrows=1, width_ratios=[1, 0.1], hspace=0.18, wspace=0.25, ) ax_t_cbar = fig.add_subplot(spec[:, 1]) ax_temp = fig.add_subplot(spec[:, 0]) im = ax_temp.scatter( ds_p1.x, ds_p1.y, s=10, c=(ds_p1.cal_temp - ds_p2.cal_temp).mean(dim='time').values, cmap='RdBu', vmin=-0.5, vmax=0.5) ax_temp.set_ylabel('Relative Northing (m)') ax_temp.set_xlabel('Relative Easting (m)') plt.colorbar(im, cax=ax_t_cbar, extend='both') ax_t_cbar.set_ylabel('p1 - p2 (K)') ax_temp.set_title('LOVE19 Twisted PVC Fiber Bias', loc='left')
_____no_output_____
MIT
notebooks/pyfocs_ex3_finalcheck.ipynb
klapo/btmm_process
Machine Translation English-German Example Using SageMaker Seq2Seq1. [Introduction](Introduction)2. [Setup](Setup)3. [Download dataset and preprocess](Download-dataset-and-preprocess)3. [Training the Machine Translation model](Training-the-Machine-Translation-model)4. [Inference](Inference) IntroductionWelcome to our Machine Translation end-to-end example! In this demo, we will train a English-German translation model and will test the predictions on a few examples.SageMaker Seq2Seq algorithm is built on top of [Sockeye](https://github.com/awslabs/sockeye), a sequence-to-sequence framework for Neural Machine Translation based on MXNet. SageMaker Seq2Seq implements state-of-the-art encoder-decoder architectures which can also be used for tasks like Abstractive Summarization in addition to Machine Translation.To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. SetupLet's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. **This should be within the same region as the Notebook Instance, training, and hosting.**- The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp in the cell below with a the appropriate full IAM role arn string(s).
# S3 bucket and prefix bucket = '<your_s3_bucket_name_here>' prefix = 'sagemaker/<your_s3_prefix_here>' # E.g.'sagemaker/seq2seq/eng-german' import boto3 import re from sagemaker import get_execution_role role = get_execution_role()
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Next, we'll import the Python libraries we'll need for the remainder of the exercise.
from time import gmtime, strftime import time import numpy as np import os import json # For plotting attention matrix later on import matplotlib %matplotlib inline import matplotlib.pyplot as plt
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Download dataset and preprocess In this notebook, we will train a English to German translation model on a dataset from the[Conference on Machine Translation (WMT) 2017](http://www.statmt.org/wmt17/).
%%bash wget http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/corpus.tc.de.gz & \ wget http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/corpus.tc.en.gz & wait gunzip corpus.tc.de.gz & \ gunzip corpus.tc.en.gz & wait mkdir validation curl http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/dev.tgz | tar xvzf - -C validation
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Please note that it is a common practise to split words into subwords using Byte Pair Encoding (BPE). Please refer to [this](https://github.com/awslabs/sockeye/tree/master/tutorials/wmt) tutorial if you are interested in performing BPE. Since training on the whole dataset might take several hours/days, for this demo, let us train on the **first 10,000 lines only**. Don't run the next cell if you want to train on the complete dataset.
!head -n 10000 corpus.tc.en > corpus.tc.en.small !head -n 10000 corpus.tc.de > corpus.tc.de.small
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Now, let's use the preprocessing script `create_vocab_proto.py` (provided with this notebook) to create vocabulary mappings (strings to integers) and convert these files to x-recordio-protobuf as required for training by SageMaker Seq2Seq. Uncomment the cell below and run to see check the arguments this script expects.
%%bash # python3 create_vocab_proto.py -h
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
The cell below does the preprocessing. If you are using the complete dataset, the script might take around 10-15 min on an m4.xlarge notebook instance. Remove ".small" from the file names for training on full datasets.
%%time %%bash python3 create_vocab_proto.py \ --train-source corpus.tc.en.small \ --train-target corpus.tc.de.small \ --val-source validation/newstest2014.tc.en \ --val-target validation/newstest2014.tc.de
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
The script will output 4 files, namely:- train.rec : Contains source and target sentences for training in protobuf format- val.rec : Contains source and target sentences for validation in protobuf format- vocab.src.json : Vocabulary mapping (string to int) for source language (English in this example)- vocab.trg.json : Vocabulary mapping (string to int) for target language (German in this example)Let's upload the pre-processed dataset and vocabularies to S3
def upload_to_s3(bucket, prefix, channel, file): s3 = boto3.resource('s3') data = open(file, "rb") key = prefix + "/" + channel + '/' + file s3.Bucket(bucket).put_object(Key=key, Body=data) upload_to_s3(bucket, prefix, 'train', 'train.rec') upload_to_s3(bucket, prefix, 'validation', 'val.rec') upload_to_s3(bucket, prefix, 'vocab', 'vocab.src.json') upload_to_s3(bucket, prefix, 'vocab', 'vocab.trg.json') region_name = boto3.Session().region_name containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/seq2seq:latest', 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/seq2seq:latest', 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/seq2seq:latest', 'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/seq2seq:latest'} container = containers[region_name] print('Using SageMaker Seq2Seq container: {} ({})'.format(container, region_name))
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Training the Machine Translation model
job_name = 'seq2seq-en-de-p2-xlarge-' + strftime("%Y-%m-%d-%H", gmtime()) print("Training job", job_name) create_training_params = \ { "AlgorithmSpecification": { "TrainingImage": container, "TrainingInputMode": "File" }, "RoleArn": role, "OutputDataConfig": { "S3OutputPath": "s3://{}/{}/".format(bucket, prefix) }, "ResourceConfig": { # Seq2Seq does not support multiple machines. Currently, it only supports single machine, multiple GPUs "InstanceCount": 1, "InstanceType": "ml.p2.xlarge", # We suggest one of ["ml.p2.16xlarge", "ml.p2.8xlarge", "ml.p2.xlarge"] "VolumeSizeInGB": 50 }, "TrainingJobName": job_name, "HyperParameters": { # Please refer to the documentation for complete list of parameters "max_seq_len_source": "60", "max_seq_len_target": "60", "optimized_metric": "bleu", "batch_size": "64", # Please use a larger batch size (256 or 512) if using ml.p2.8xlarge or ml.p2.16xlarge "checkpoint_frequency_num_batches": "1000", "rnn_num_hidden": "512", "num_layers_encoder": "1", "num_layers_decoder": "1", "num_embed_source": "512", "num_embed_target": "512", "checkpoint_threshold": "3", "max_num_batches": "2100" # Training will stop after 2100 iterations/batches. # This is just for demo purposes. Remove the above parameter if you want a better model. }, "StoppingCondition": { "MaxRuntimeInSeconds": 48 * 3600 }, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": "s3://{}/{}/train/".format(bucket, prefix), "S3DataDistributionType": "FullyReplicated" } }, }, { "ChannelName": "vocab", "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": "s3://{}/{}/vocab/".format(bucket, prefix), "S3DataDistributionType": "FullyReplicated" } }, }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": "s3://{}/{}/validation/".format(bucket, prefix), "S3DataDistributionType": "FullyReplicated" } }, } ] } sagemaker_client = boto3.Session().client(service_name='sagemaker') sagemaker_client.create_training_job(**create_training_params) status = sagemaker_client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus'] print(status) status = sagemaker_client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus'] print(status) # if the job failed, determine why if status == 'Failed': message = sage.describe_training_job(TrainingJobName=job_name)['FailureReason'] print('Training failed with the following error: {}'.format(message)) raise Exception('Training job failed')
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
> Now wait for the training job to complete and proceed to the next step after you see model artifacts in your S3 bucket. You can jump to [Use a pretrained model](Use-a-pretrained-model) as training might take some time. InferenceA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means translating sentence(s) from English to German.This section involves several steps,- Create model - Create a model using the artifact (model.tar.gz) produced by training- Create Endpoint Configuration - Create a configuration defining an endpoint, using the above model- Create Endpoint - Use the configuration to create an inference endpoint.- Perform Inference - Perform inference on some input data using the endpoint. Create modelWe now create a SageMaker Model from the training output. Using the model, we can then create an Endpoint Configuration.
use_pretrained_model = False
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Use a pretrained model Please uncomment and run the cell below if you want to use a pretrained model, as training might take several hours/days to complete.
# use_pretrained_model = True # model_name = "pretrained-en-de-model" # !curl https://s3-us-west-2.amazonaws.com/gsaur-seq2seq-data/seq2seq/eng-german/full-nb-translation-eng-german-p2-16x-2017-11-24-22-25-53/output/model.tar.gz > model.tar.gz # !curl https://s3-us-west-2.amazonaws.com/gsaur-seq2seq-data/seq2seq/eng-german/full-nb-translation-eng-german-p2-16x-2017-11-24-22-25-53/output/vocab.src.json > vocab.src.json # !curl https://s3-us-west-2.amazonaws.com/gsaur-seq2seq-data/seq2seq/eng-german/full-nb-translation-eng-german-p2-16x-2017-11-24-22-25-53/output/vocab.trg.json > vocab.trg.json # upload_to_s3(bucket, prefix, 'pretrained_model', 'model.tar.gz') # model_data = "s3://{}/{}/pretrained_model/model.tar.gz".format(bucket, prefix) %%time sage = boto3.client('sagemaker') if not use_pretrained_model: info = sage.describe_training_job(TrainingJobName=job_name) model_name=job_name model_data = info['ModelArtifacts']['S3ModelArtifacts'] print(model_name) print(model_data) primary_container = { 'Image': container, 'ModelDataUrl': model_data } create_model_response = sage.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response['ModelArn'])
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Create endpoint configurationUse the model to create an endpoint configuration. The endpoint configuration also contains information about the type and number of EC2 instances to use when hosting the model.Since SageMaker Seq2Seq is based on Neural Nets, we could use an ml.p2.xlarge (GPU) instance, but for this example we will use a free tier eligible ml.m4.xlarge.
from time import gmtime, strftime endpoint_config_name = 'Seq2SeqEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print(endpoint_config_name) create_endpoint_config_response = sage.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Create endpointLastly, we create the endpoint that serves up model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 10-15 minutes to complete.
%%time import time endpoint_name = 'Seq2SeqEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print(endpoint_name) create_endpoint_response = sage.create_endpoint( EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name) print(create_endpoint_response['EndpointArn']) resp = sage.describe_endpoint(EndpointName=endpoint_name) status = resp['EndpointStatus'] print("Status: " + status) # wait until the status has changed sage.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sage.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response['EndpointStatus'] print('Endpoint creation ended with EndpointStatus = {}'.format(status)) if status != 'InService': raise Exception('Endpoint creation failed.')
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
If you see the message,> Endpoint creation ended with EndpointStatus = InServicethen congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console. We will finally create a runtime object from which we can invoke the endpoint.
runtime = boto3.client(service_name='runtime.sagemaker')
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Perform Inference Using JSON format for inference (Suggested for a single or small number of data instances) Note that you don't have to convert string to text using the vocabulary mapping for inference using JSON mode
sentences = ["you are so good !", "can you drive a car ?", "i want to watch a movie ." ] payload = {"instances" : []} for sent in sentences: payload["instances"].append({"data" : sent}) response = runtime.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/json', Body=json.dumps(payload)) response = response["Body"].read().decode("utf-8") response = json.loads(response) print(response)
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Retrieving the Attention Matrix Passing `"attention_matrix":"true"` in `configuration` of the data instance will return the attention matrix.
sentence = 'can you drive a car ?' payload = {"instances" : [{ "data" : sentence, "configuration" : {"attention_matrix":"true"} } ]} response = runtime.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/json', Body=json.dumps(payload)) response = response["Body"].read().decode("utf-8") response = json.loads(response)['predictions'][0] source = sentence target = response["target"] attention_matrix = np.array(response["matrix"]) print("Source: %s \nTarget: %s" % (source, target)) # Define a function for plotting the attentioan matrix def plot_matrix(attention_matrix, target, source): source_tokens = source.split() target_tokens = target.split() assert attention_matrix.shape[0] == len(target_tokens) plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys") plt.xlabel("target") plt.ylabel("source") plt.gca().set_xticks([i for i in range(0, len(target_tokens))]) plt.gca().set_yticks([i for i in range(0, len(source_tokens))]) plt.gca().set_xticklabels(target_tokens) plt.gca().set_yticklabels(source_tokens) plt.tight_layout() plot_matrix(attention_matrix, target, source)
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Using Protobuf format for inference (Suggested for efficient bulk inference) Reading the vocabulary mappings as this mode of inference accepts list of integers and returns list of integers.
import io import tempfile from record_pb2 import Record from create_vocab_proto import vocab_from_json, reverse_vocab, write_recordio, list_to_record_bytes, read_next source = vocab_from_json("vocab.src.json") target = vocab_from_json("vocab.trg.json") source_rev = reverse_vocab(source) target_rev = reverse_vocab(target) sentences = ["this is so cool", "i am having dinner .", "i am sitting in an aeroplane .", "come let us go for a long drive ."]
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Converting the string to integers, followed by protobuf encoding:
# Convert strings to integers using source vocab mapping. Out-of-vocabulary strings are mapped to 1 - the mapping for <unk> sentences = [[source.get(token, 1) for token in sentence.split()] for sentence in sentences] f = io.BytesIO() for sentence in sentences: record = list_to_record_bytes(sentence, []) write_recordio(f, record) response = runtime.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-recordio-protobuf', Body=f.getvalue()) response = response["Body"].read()
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Now, parse the protobuf response and convert list of integers back to strings
def _parse_proto_response(received_bytes): output_file = tempfile.NamedTemporaryFile() output_file.write(received_bytes) output_file.flush() target_sentences = [] with open(output_file.name, 'rb') as datum: next_record = True while next_record: next_record = read_next(datum) if next_record: rec = Record() rec.ParseFromString(next_record) target = list(rec.features["target"].int32_tensor.values) target_sentences.append(target) else: break return target_sentences targets = _parse_proto_response(response) resp = [" ".join([target_rev.get(token, "<unk>") for token in sentence]) for sentence in targets] print(resp)
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
Stop / Close the Endpoint (Optional)Finally, we should delete the endpoint before we close the notebook.
sage.delete_endpoint(EndpointName=endpoint_name)
_____no_output_____
Apache-2.0
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
karim7262/amazon-sagemaker-examples
from google.colab import drive drive.mount('/content/drive') import numpy as np import csv
_____no_output_____
MIT
B_DS2.ipynb
eunyul24/eunyul24.github.io
header = [] userId = [] movieId = [] ratings = [] test = [] rownum = -1 with open('/content/drive/My Drive/Colab Notebooks/ml-20m/ratings.csv','r') as f: data = csv.reader(f) for row in data: rownum += 1 if rownum == 0: header = row continue if int(row[3]) < 1388502017: userId.append(int(row[0])) movieId.append(int(row[1])) ratings.append(float(row[2])) else: test.append([int(row[0]), int(row[1]), float(row[2]), int(row[3])]) print(len(userId)) print(len(test))
19152913 847350
MIT
B_DS2.ipynb
eunyul24/eunyul24.github.io
userIdx = dict() for i, uid in enumerate(np.unique(userId)): userIdx[uid] = i movieIdx = dict() for i, mid in enumerate(np.unique(movieId)): movieIdx[mid] = i X = np.zeros((len(ratings),2), dtype=int) for i in range(len(userId)): X[i] = [userIdx[userId[i]], movieIdx[movieId[i]]]
_____no_output_____
MIT
B_DS2.ipynb
eunyul24/eunyul24.github.io
class MatrixFactorization(): def __init__(self, ratings, X, k = 10, learning_rate = 0.01, reg_param = 0.1, epochs = 20): """ param R: ratings param X: userId, movieId param k: latent parameter param learning_rate: alpha on weight update param reg_param: beta on weight update param epochs: training epochs """ self.ratings = ratings self.X = X self.num_users = len(np.unique(X[:, 0])) self.num_movies = len(np.unique(X[:, 1])) self.k = k self.learning_rate = learning_rate self.reg_param = reg_param self.epochs = epochs def fit(self): """ training Matrix Factorization : Update matrix latent weight and bias return: training_process """ # init latent features self.P = np.random.normal(size=(self.num_users, self.k)) self.Q = np.random.normal(size=(self.num_movies, self.k)) # init biases self.b = np.mean(self.ratings) self.b_P = np.zeros(self.num_users) self.b_Q = np.zeros(self.num_movies) # train while epochs self.training_process = [] for epoch in range(self.epochs): for i,rating in enumerate(self.ratings): self.gradient_descent(self.X[i, 0], self.X[i, 1], rating) rmse = self.rmse() self.training_process.append((epoch,rmse)) # print status if (epoch + 1) % 10 == 0: print("Iteration: %d ; RMSE = %.4f" % (epoch + 1, rmse)) return self.training_process def rmse(self): """ compute root mean square error return: rmse cost """ error = 0 for i,rating in enumerate(ratings): error += pow(rating - self.get_prediction(self.X[i, 0], self.X[i, 1]), 2) return np.sqrt(error) def gradient_descent(self, i, j, rating): """ graident descent function param i: user index of matrix param j: item index of matrix param rating: rating of (i,j) """ # get error prediction = self.get_prediction(i, j) error = rating - prediction # update biases self.b_P[i] += self.learning_rate * (error - self.reg_param * self.b_P[i]) self.b_Q[j] += self.learning_rate * (error - self.reg_param * self.b_Q[j]) # update latent feature self.P[i, :] += self.learning_rate * (error * self.Q[j, :] - self.reg_param * self.P[i, :]) self.Q[j, :] += self.learning_rate * (error * self.P[i, :] - self.reg_param * self.Q[j, :]) def get_prediction(self, i, j): """ get predicted rating: user_i, item_j return: prediction of r_ij """ return self.b + self.b_P[i] + self.b_Q[j] + self.P[i, :].dot(self.Q[j, :].T)
_____no_output_____
MIT
B_DS2.ipynb
eunyul24/eunyul24.github.io
MF = MatrixFactorization(ratings, X) training_process = MF.fit() print("train RMSE:", MF.rmse()) f = open('/content/drive/My Drive/Colab Notebooks/ml-20m/B_results_DS2.csv', 'w', encoding='utf-8') header[2] = 'predected rating' wr = csv.writer(f) wr.writerow(header) error = 0 for uId, mId, rating, time in test: if uId in userIdx.keys() and mId in movieIdx.keys(): predicted = MF.get_prediction(userIdx[uId], movieIdx[mId]) elif not uId in userIdx.keys() and mId in movieIdx.keys(): predicted = np.mean([ratings[i] for i in np.where(X[:, 1] == movieIdx[mId])[0]]) elif uId in userIdx.keys() and not mId in movieIdx.keys(): predicted = np.mean([ratings[i] for i in np.where(X[:, 0] == userIdx[uId])[0]]) else: predicted = np.mean(ratings) error += pow(rating - predicted, 2) wr.writerow([uId, mId, predicted,time]) f.close() print("test RMSE:", np.sqrt(error))
_____no_output_____
MIT
B_DS2.ipynb
eunyul24/eunyul24.github.io
911 Calls Capstone Project - Solutions For this capstone project we will be analyzing some 911 call data from [Kaggle](https://www.kaggle.com/mchirico/montcoalert). The data contains the following fields:* lat : String variable, Latitude* lng: String variable, Longitude* desc: String variable, Description of the Emergency Call* zip: String variable, Zipcode* title: String variable, Title* timeStamp: String variable, YYYY-MM-DD HH:MM:SS* twp: String variable, Township* addr: String variable, Address* e: String variable, Dummy variable (always 1)Just go along with this notebook and try to complete the instructions or answer the questions in bold using your Python and Data Science skills! ___* Import numpy and Pandas
import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Import visualization libraries and set %matplotlib inline.
df = pd.read_csv('911.csv')
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Read in the csv file as a dataframe called df
df.dtypes df.info() df.head(3)
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
Short Questions* What are the bottom 5 zipcodes for 911 calls?
df['zip'].value_counts().tail(5) df.head()
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* What are the top 5 townships (twp) for 911 calls?
df['twp'].value_counts().head(5)
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Take a look at the 'title' column, how many unique title codes are there?
df['title'].nunique()
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
Adding New Features* In the titles column there are "Reasons/Departments" specified before the title code. These are EMS, Fire, and Traffic. Use .apply() with a custom lambda expression to create a new column called "Reason" that contains this string value.* *For example, if the title column value is EMS: BACK PAINS/INJURY , the Reason column value would be EMS.*
df['Reason'] = df['title'].apply(lambda title: title.split(':')[0]) df.head()
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Most common Reason for a 911 call based off of this new column?
# df3 = df2.value_counts() # df3.columns= 'count' df['Reason'].value_counts() sns.countplot(x='Reason',data=df,palette='viridis')
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
___* Now let us begin to focus on time information. What is the data type of the objects in the timeStamp column?
# Convert it to DateTime object df['timeStamp'] = pd.to_datetime(df['timeStamp']) df['Hour'] = df['timeStamp'].apply(lambda time: time.hour) df['Month'] = df['timeStamp'].apply(lambda time: time.month) df['Day of Week'] = df['timeStamp'].apply(lambda time: time.dayofweek) # map Day of week column according to the days in a week dmap = {0:'Mon',1:'Tue',2:'Wed',3:'Thu',4:'Fri',5:'Sat',6:'Sun'} df['Day of Week'] = df['Day of Week'].map(dmap) sns.countplot(x='Day of Week',data=df,hue='Reason',palette='viridis') # To relocate the legend plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) sns.countplot(x='Month',data=df,hue='Reason',palette='viridis') # To relocate the legend plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* You should have noticed it was missing some Months, let's see if we can maybe fill in this information by plotting the information in another way, possibly a simple line plot that fills in the missing months, in order to do this, we'll need to do some work with pandas... * Now create a gropuby object called byMonth, where you group the DataFrame by the month column and use the count() method for aggregation. Use the head() method on this returned DataFrame.
byMonth = df.groupby('Month').count() byMonth.head() # Simple line plot of any column of byMonth byMonth['twp'].plot() # Now see if you can use seaborn's lmplot() to create a linear fit # on the number of calls per month. Keep in mind you # may need to reset the index to a column. sns.lmplot(x='Month',y='twp',data=byMonth.reset_index()) # Create a new column Date in the df df['Date']=df['timeStamp'].apply(lambda t: t.date()) df.head()
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Now groupby this Date column with the count() aggregate and create a plot of counts of 911 calls.
# use .plot() df.groupby('Date').count()['twp'].plot() plt.tight_layout()
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Now recreate this plot but create 3 separate plots with each plot representing a Reason for the 911 call
# Traffic df[df['Reason']=='Traffic'].groupby('Date').count()['twp'].plot() plt.title('Traffic') plt.tight_layout() # Fire df[df['Reason']=='Fire'].groupby('Date').count()['twp'].plot() plt.title('Fire') plt.tight_layout() # EMS df[df['Reason']=='EMS'].groupby('Date').count()['twp'].plot() plt.title('EMS') plt.tight_layout()
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Now let's move on to creating heatmaps with seaborn and our data. We'll first need to restructure the dataframe so that the columns become the Hours and the Index becomes the Day of the Week. There are lots of ways to do this, but I would recommend trying to combine groupby with an [unstack](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.unstack.html) method.
dayHour = df.groupby(by=['Day of Week','Hour']).count()['Reason'].unstack() dayHour.head() dayHour.head() plt.figure(figsize=(12,6)) sns.heatmap(dayHour) sns.clustermap(dayHour)
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
* Now repeat these same plots and operations, for a DataFrame that shows the Month as the column.
dayMonth = df.groupby(by=['Day of Week','Month']).count()['Reason'].unstack() dayMonth.head() plt.figure(figsize=(12,6)) sns.heatmap(dayMonth) sns.clustermap(dayMonth)
_____no_output_____
BSD-2-Clause
Exercises - Qasim/Python. Pandas, Viz/Capstone Project 1/911 Calls - o .ipynb
k21k/Python-Notes
Let's Grow your Own Inner Core! Choose a model in the list: - geodyn_trg.TranslationGrowthRotation() - geodyn_static.Hemispheres() Choose a proxy type: - age - position - phi - theta - growth rate set the parameters for the model : geodynModel.set_parameters(parameters) set the units : geodynModel.define_units() Choose a data set: - data.SeismicFromFile(filename) Lauren's data set - data.RandomData(numbers_of_points) - data.PerfectSamplingEquator(numbers_of_points) organized on a cartesian grid. numbers_of_points is the number of points along the x or y axis. The total number of points is numbers_of_points**2*pi/4 - as a special plot function to show streamlines: plot_c_vec(self,modelgeodyn) - data.PerfectSamplingEquatorRadial(Nr, Ntheta) same than below, but organized on a polar grid, not a cartesian grid. Extract the info: - calculate the proxy value for all points of the data set: geodyn.evaluate_proxy(data_set, geodynModel) - extract the positions as numpy arrays: extract_rtp or extract_xyz - calculate other variables: positions.angular_distance_to_point(t,p, t_point, p_point)
%matplotlib inline # import statements import numpy as np import matplotlib.pyplot as plt #for figures from mpl_toolkits.basemap import Basemap #to render maps import math import json #to write dict with parameters from GrowYourIC import positions, geodyn, geodyn_trg, geodyn_static, plot_data, data plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures cm = plt.cm.get_cmap('viridis') cm2 = plt.cm.get_cmap('winter')
/Users/marine/.python-eggs/GrowYourIC-0.5-py3.5.egg-tmp/GrowYourIC/data/CM2008_data.mat
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Define the geodynamical model Un-comment one of the model
## un-comment one of them geodynModel = geodyn_trg.TranslationGrowthRotation() #can do all the models presented in the paper # geodynModel = geodyn_static.Hemispheres() #this is a static model, only hemispheres.
_____no_output_____
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Change the values of the parameters to get the model you want (here, parameters for .TranslationGrowthRotation())
age_ic_dim = 1e9 #in years rICB_dim = 1221. #in km v_g_dim = rICB_dim/age_ic_dim # in km/years #growth rate print("Growth rate is {:.2e} km/years".format(v_g_dim)) v_g_dim_seconds = v_g_dim*1e3/(np.pi*1e7) translation_velocity_dim = 0.8*v_g_dim_seconds#4e-10 #0.8*v_g_dim_seconds#4e-10 #m.s, value for today's Earth with Q_cmb = 10TW (see Alboussiere et al. 2010) time_translation = rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7) maxAge = 2.*time_translation/1e6 print("The translation recycles the inner core material in {0:.2e} million years".format(maxAge)) print("Translation velocity is {0:.2e} km/years".format(translation_velocity_dim*np.pi*1e7/1e3)) units = None #we give them already dimensionless parameters. rICB = 1. age_ic = 1. omega = 0.#0.5*np.pi/200e6*age_ic_dim#0.5*np.pi #0. #0.5*np.pi/200e6*age_ic_dim# 0.#0.5*np.pi#0.#0.5*np.pi/200e6*age_ic_dim #0. #-0.5*np.pi # Rotation rates has to be in ]-np.pi, np.pi[ print("Rotation rate is {:.2e}".format(omega)) velocity_amplitude = translation_velocity_dim*age_ic_dim*np.pi*1e7/rICB_dim/1e3 velocity_center = [0., 100.]#center of the eastern hemisphere velocity = geodyn_trg.translation_velocity(velocity_center, velocity_amplitude) exponent_growth = 1.#0.1#1 print(v_g_dim, velocity_amplitude, omega/age_ic_dim*180/np.pi*1e6)
Growth rate is 1.22e-06 km/years The translation recycles the inner core material in 2.50e+03 million years Translation velocity is 9.77e-07 km/years Rotation rate is 0.00e+00 1.221e-06 0.7999999999999999 0.0
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Define a proxy type, and a proxy name (to be used in the figures to annotate the axes)You can re-define it later if you want (or define another proxy_type2 if needed)
proxy_type = "age"#"growth rate" proxy_name = "age (Myears)" #growth rate (km/Myears)" proxy_lim = [0, maxAge] #or None #proxy_lim = None fig_name = "figures/test_" #to name the figures print(rICB, age_ic, velocity_amplitude, omega, exponent_growth, proxy_type) print(velocity)
1.0 1.0 0.7999999999999999 0.0 1.0 age [ -1.38918542e-01 7.87846202e-01 4.89858720e-17]
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Parameters for the geodynamical modelThis will input the different parameters in the model.
parameters = dict({'units': units, 'rICB': rICB, 'tau_ic':age_ic, 'vt': velocity, 'exponent_growth': exponent_growth, 'omega': omega, 'proxy_type': proxy_type}) geodynModel.set_parameters(parameters) geodynModel.define_units() param = parameters param['vt'] = parameters['vt'].tolist() #for json serialization # write file with parameters, readable with json, byt also human-readable with open(fig_name+'parameters.json', 'w') as f: json.dump(param, f) print(parameters)
{'exponent_growth': 1.0, 'vt': [-0.13891854213354424, 0.7878462024097663, 4.8985871965894125e-17], 'proxy_type': 'age', 'omega': 0.0, 'tau_ic': 1.0, 'units': None, 'rICB': 1.0}
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Different data set and visualisations Perfect sampling at the equator (to visualise the flow lines)You can add more points to get a better precision.
npoints = 10 #number of points in the x direction for the data set. data_set = data.PerfectSamplingEquator(npoints, rICB = 1.) data_set.method = "bt_point" proxy = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="age", verbose = False) data_set.plot_c_vec(geodynModel, proxy=proxy, cm=cm, nameproxy="age (Myears)") plt.savefig(fig_name+"equatorial_plot.pdf", bbox_inches='tight')
=== == Evaluate value of proxy for all points of the data set = Geodynamic model is Translation, Rotation and Growth = Proxy is age = Data set is Perfect sampling in the equatorial plane = Proxy is evaluated for bt_point = Number of points to examine: 60 ===
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Perfect sampling in the first 100km (to visualise the depth evolution)
data_meshgrid = data.Equator_upperpart(10,10) data_meshgrid.method = "bt_point" proxy_meshgrid = geodyn.evaluate_proxy(data_meshgrid, geodynModel, proxy_type=proxy_type, verbose = False) #r, t, p = data_meshgrid.extract_rtp("bottom_turning_point") fig3, ax3 = plt.subplots(figsize=(8, 2)) X, Y, Z = data_meshgrid.mesh_RPProxy(proxy_meshgrid) sc = ax3.contourf(Y, rICB_dim*(1.-X), Z, 100, cmap=cm) sc2 = ax3.contour(sc, levels=sc.levels[::15], colors = "k") ax3.set_ylim(-0, 120) fig3.gca().invert_yaxis() ax3.set_xlim(-180,180) cbar = fig3.colorbar(sc) #cbar.set_clim(0, maxAge) cbar.set_label(proxy_name) ax3.set_xlabel("longitude") ax3.set_ylabel("depth below ICB (km)") plt.savefig(fig_name+"meshgrid.pdf", bbox_inches='tight') npoints = 20 #number of points in the x direction for the data set. data_set = data.PerfectSamplingSurface(npoints, rICB = 1., depth=0.01) data_set.method = "bt_point" proxy_surface = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type=proxy_type, verbose = False) #r, t, p = data_set.extract_rtp("bottom_turning_point") X, Y, Z = data_set.mesh_TPProxy(proxy_surface) ## map m, fig = plot_data.setting_map() y, x = m(Y, X) sc = m.contourf(y, x, Z, 30, cmap=cm, zorder=2, edgecolors='none') plt.title("Dataset: {},\n geodynamic model: {}".format(data_set.name, geodynModel.name)) cbar = plt.colorbar(sc) cbar.set_label(proxy_name) fig.savefig(fig_name+"map_surface.pdf", bbox_inches='tight')
=== == Evaluate value of proxy for all points of the data set = Geodynamic model is Translation, Rotation and Growth = Proxy is age = Data set is Perfect sampling at the surface = Proxy is evaluated for bt_point = Number of points to examine: 400 ===
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Random data set, in the first 100km - bottom turning point only Calculate the data
# random data set data_set_random = data.RandomData(300) data_set_random.method = "bt_point" proxy_random = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type=proxy_type, verbose=False) data_path = "../GrowYourIC/data/" geodynModel.data_path = data_path if proxy_type == "age": # ## domain size and Vp proxy_random_size = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type="domain_size", verbose=False) proxy_random_dV = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type="dV_V", verbose=False) r, t, p = data_set_random.extract_rtp("bottom_turning_point") dist = positions.angular_distance_to_point(t, p, *velocity_center) ## map m, fig = plot_data.setting_map() x, y = m(p, t) sc = m.scatter(x, y, c=proxy_random,s=8, zorder=10, cmap=cm, edgecolors='none') plt.title("Dataset: {},\n geodynamic model: {}".format(data_set_random.name, geodynModel.name)) cbar = plt.colorbar(sc) cbar.set_label(proxy_name) fig.savefig(fig_name+data_set_random.shortname+"_map.pdf", bbox_inches='tight') ## phi and distance plots fig, ax = plt.subplots(2,2, figsize=(8.0, 5.0)) sc1 = ax[0,0].scatter(p, proxy_random, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0) phi = np.linspace(-180,180, 50) #analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) #ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) ax[0,0].set_xlabel("longitude") ax[0,0].set_ylabel(proxy_name) if proxy_lim is not None: ax[0,0].set_ylim(proxy_lim) sc2 = ax[0,1].scatter(dist, proxy_random, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[0,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) phi = np.linspace(-90,90, 100) if proxy_type == "age": analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) analytic_equator = np.maximum(2*np.sin((-phi)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,1].plot(phi+90,analytic_equator, 'r', linewidth=2) ax[0,1].set_xlim([0,180]) ax[0,0].set_xlim([-180,180]) cbar = fig.colorbar(sc1) cbar.set_label("longitude: abs(theta)") if proxy_lim is not None: ax[0,1].set_ylim(proxy_lim) ## figure with domain size and Vp if proxy_type == "age": sc3 = ax[1,0].scatter(dist, proxy_random_size, c=abs(t), cmap=cm2, vmin =-0, vmax =90, s=3, linewidth=0) ax[1,0].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,0].set_ylabel("domain size (m)") ax[1,0].set_xlim([0,180]) ax[1,0].set_ylim([0, 2500.000]) sc4 = ax[1,1].scatter(dist, proxy_random_dV, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[1,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,1].set_ylabel("dV/V") ax[1,1].set_xlim([0,180]) ax[1,1].set_ylim([-0.017, -0.002]) fig.savefig(fig_name +data_set_random.shortname+ '_long_dist.pdf', bbox_inches='tight') fig, ax = plt.subplots(figsize=(8, 2)) sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy_random, s=10,cmap=cm, linewidth=0) ax.set_ylim(-0,120) fig.gca().invert_yaxis() ax.set_xlim(-180,180) cbar = fig.colorbar(sc) if proxy_lim is not None: cbar.set_clim(0, maxAge) ax.set_xlabel("longitude") ax.set_ylabel("depth below ICB (km)") cbar.set_label(proxy_name) fig.savefig(fig_name+data_set_random.shortname+"_depth.pdf", bbox_inches='tight')
_____no_output_____
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Real Data set from Waszek paper
## real data set data_set = data.SeismicFromFile("../GrowYourIC/data/WD11.dat") data_set.method = "bt_point" proxy2 = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type=proxy_type, verbose=False) if proxy_type == "age": ## domain size and DV/V proxy_size = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="domain_size", verbose=False) proxy_dV = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="dV_V", verbose=False) r, t, p = data_set.extract_rtp("bottom_turning_point") dist = positions.angular_distance_to_point(t, p, *velocity_center) ## map m, fig = plot_data.setting_map() x, y = m(p, t) sc = m.scatter(x, y, c=proxy2,s=8, zorder=10, cmap=cm, edgecolors='none') plt.title("Dataset: {},\n geodynamic model: {}".format(data_set.name, geodynModel.name)) cbar = plt.colorbar(sc) cbar.set_label(proxy_name) fig.savefig(fig_name+data_set.shortname+"_map.pdf", bbox_inches='tight') ## phi and distance plots fig, ax = plt.subplots(2,2, figsize=(8.0, 5.0)) sc1 = ax[0,0].scatter(p, proxy2, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0) phi = np.linspace(-180,180, 50) #analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) #ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) ax[0,0].set_xlabel("longitude") ax[0,0].set_ylabel(proxy_name) if proxy_lim is not None: ax[0,0].set_ylim(proxy_lim) sc2 = ax[0,1].scatter(dist, proxy2, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[0,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) phi = np.linspace(-90,90, 100) if proxy_type == "age": analytic_equator = np.maximum(2*np.sin((-phi)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,1].plot(phi+90,analytic_equator, 'r', linewidth=2) analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.) ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2) ax[0,1].set_xlim([0,180]) ax[0,0].set_xlim([-180,180]) cbar = fig.colorbar(sc1) cbar.set_label("longitude: abs(theta)") if proxy_lim is not None: ax[0,1].set_ylim(proxy_lim) ## figure with domain size and Vp if proxy_type == "age": sc3 = ax[1,0].scatter(dist, proxy_size, c=abs(t), cmap=cm2, vmin =-0, vmax =90, s=3, linewidth=0) ax[1,0].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,0].set_ylabel("domain size (m)") ax[1,0].set_xlim([0,180]) ax[1,0].set_ylim([0, 2500.000]) sc4 = ax[1,1].scatter(dist, proxy_dV, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0) ax[1,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center)) ax[1,1].set_ylabel("dV/V") ax[1,1].set_xlim([0,180]) ax[1,1].set_ylim([-0.017, -0.002]) fig.savefig(fig_name + data_set.shortname+'_long_dist.pdf', bbox_inches='tight') fig, ax = plt.subplots(figsize=(8, 2)) sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy2, s=10,cmap=cm, linewidth=0) ax.set_ylim(-0,120) fig.gca().invert_yaxis() ax.set_xlim(-180,180) cbar = fig.colorbar(sc) if proxy_lim is not None: cbar.set_clim(0, maxAge) ax.set_xlabel("longitude") ax.set_ylabel("depth below ICB (km)") cbar.set_label(proxy_name) fig.savefig(fig_name+data_set.shortname+"_depth.pdf", bbox_inches='tight')
_____no_output_____
MIT
notebooks/sandbox-grow.ipynb
MarineLasbleis/GrowYourIC
Запуск SEO бота Screaming Frog SEO spider в облаке через Google Colab ------------- > *Protip: под задачу для крупного сайта лучше всего подходят High RAM (25GB) инстансы без GPU/TPU, доступные в PRO подписке* Косметическое улучшение: добавляем перенос строки для длинных однострочных команд
from IPython.display import HTML, display def set_css(): display(HTML(''' <style> pre { white-space: pre-wrap; } </style> ''')) get_ipython().events.register('pre_run_cell', set_css)
_____no_output_____
MIT
Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb
danzerzine/seospider-colab
Подключаем Google Drive в котором хранятся конфиги бота и куда будут сохраняться результаты обхода
from google.colab import drive drive.mount('/content/drive')
_____no_output_____
MIT
Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb
danzerzine/seospider-colab
Узнаем внешний IP инстанса чтобы затем ручками добавить его в исключения файерволла cloudflare -- иначе очень быстро упремся в rate limit и нам начнут показывать страницу с проверкой на человекообразность
!wget -qO- http://ipecho.net/plain | xargs echo && wget -qO - icanhazip.com
_____no_output_____
MIT
Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb
danzerzine/seospider-colab
Устанавливаем последнюю версию seo spider, делаем мелкие дела по хозяйству* Обновляем установленные linux пакеты * Копируем настройки с десктопной версии SEO spider в локальную папку инстанса (это нужно чтобы передать токены авторизации к google search console, GA и так далее)
#@title Settings directory on GDrive { vertical-output: true, display-mode: "both" } settings_path = "" #@param {type:"string"} !wget https://download.screamingfrog.co.uk/products/seo-spider/screamingfrogseospider_16.3_all.deb !apt-get install screamingfrogseospider_16.3_all.deb !sudo apt-get update && sudo apt-get upgrade -y !mkdir -p ~/.ScreamingFrogSEOSpider !cp -r $settings_path/* ~/.ScreamingFrogSEOSpider
_____no_output_____
MIT
Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb
danzerzine/seospider-colab
Запускаем bash скрипт для донастройки инстанса и бота Он добавит виртуальный дисплей для вывода из JAVA, переключит бота в режим сохранения результатов на диске вместо RAM и т.д.
!wget https://raw.githubusercontent.com/fili/screaming-frog-on-google-compute-engine/master/gce-sf.sh -O install.sh && chmod +x install.sh && source ./install.sh
_____no_output_____
MIT
Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb
danzerzine/seospider-colab