file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
tax_task.py | _true', help='use position embed or not')
parser.add_argument('--position_embed_size', type=int, default=100, help='position embed size')
parser.add_argument('--position_embed_mode', type=str, default='sum', choices=['sum','concat'], help='position embed mode[sum,concat]')
parser.add_argument('--self_attention_units', type=int, default=64, help='self attention units')
parser.add_argument('--self_attention_num_heads', type=int, default=4, help='self attention num heads')
parser.add_argument('--no_history', action='store_true', help='use history attention or not')
parser.add_argument('--no_interaction', action='store_true', help='use interaction attention or not')
parser.add_argument('--no_memory', action='store_true', help='remove memory or not')
parser.add_argument('--memory_word_num', type=int, default=256, help='memory word num')
parser.add_argument('--memory_word_size', type=int, default=64, help='memory word size')
parser.add_argument('--memory_read_heads', type=int, default=4, help='memory read heads')
parser.add_argument('--feature_size', type=int, default=256, help='feature size')
parser.add_argument('--multi', action='store_true', help='multi-label classification or not')
parser.add_argument('--epochs', type=int, default=10, help='epochs')
parser.add_argument('--focal_loss', action='store_false', help='use focal loss')
parser.add_argument('--focal_loss_alpha', type=float, default=0.6, help='focal loss alpha')
parser.add_argument('--focal_loss_gamma', type=float, default=2.0, help='focal loss gamma')
parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')
parser.add_argument('--lr', type=float, default=0.00005, help='learning rate')
parser.add_argument('--lr_decay', type=float, default=1e-6, help='learning rate decay')
parser.add_argument('--model_path', type=str, help='model path')
args = parser.parse_args()
model_name = "AMANet-tax"
# time
time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
def write_log(callback, names, logs, epoch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, epoch_no)
callback.writer.flush()
CallBack = TensorBoard(log_dir=('../tb-logs/tax-task/%s/%s' %(model_name, time_str)), # log dir
histogram_freq=0,
write_graph=True,
write_grads=True,
write_images=True,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
train_names = ['train_loss']
val_names = ["val_acc", "val_prec", "val_recall", "val_f1", "val_prauc", "val_roc_auc"]
def train(config):
# model save path
model_save_dir = os.path.join("../model/tax-task", model_name, time_str)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
# log save path
log_save_dir = os.path.join("../logs/tax-task", model_name, time_str)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
# load data
data_train, data_valid, data_test, voc_size = load_tax_data(config["datapath"])
# input1 vocab size
config["vocab_size1"] = voc_size[0]
# input1 vocab size
config["vocab_size2"] = voc_size[1]
# output vocab size
config["output_size"] = voc_size[2]
# build model
model = build(config)
# plot model graph
model_graph_file = os.path.join(model_save_dir, ("model_%s.png" % time_str))
plot_model(model, to_file=model_graph_file)
# model summary
model.summary()
# model tensorboard logs
CallBack.set_model(model)
# eval logs
file = open(os.path.join(log_save_dir, "statistic_%s.txt" % time_str), "w+")
file.write(str(config)+"\n")
model.summary(print_fn=lambda x: file.write(x + '\n'))
train_size = len(data_train)
best_f1 = 0.0
best_epoch = 0
best_model = ""
# train
for epoch in range(config["epochs"]):
# 新一次迭代,打乱训练集
data_train = shuffle(d | if prob >= 0.5:
train_pred_output.append(1)
else:
train_pred_output.append(0)
end_time = time.time()
elapsed_time = (end_time - start_time) / 60
train_acc, train_prec, train_recall, train_f1 = metrics_non_multi(train_real_output, train_pred_output)
train_roc_auc = roc_auc_non_multi(train_real_output, train_pred_output_prob)
train_prauc = prc_auc_non_multi(train_real_output, train_pred_output_prob)
if config["use_tensorboard"]:
train_logs = [sum(losses)/len(losses)]
write_log(CallBack, train_names, train_logs, epoch+1)
print('')
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_valid, config)
if config["use_tensorboard"]:
val_logs = [acc, pre, recall, f1, prauc, roc_auc]
write_log(CallBack, val_names, val_logs, epoch+1)
file.write("spend time to train: %.2f min\n" % elapsed_time)
file.write("train loss: %f\n" % (sum(losses)/ len(losses)))
file.write("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
print("spend time to train: %.2f min" % elapsed_time)
print("train loss: %f, acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % ((sum(losses)/ len(losses)), train_acc, train_prec, train_recall, train_f1, train_prauc, train_roc_auc))
print("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
model_save_path = os.path.join(model_save_dir, 'model_%d_%s_%.4f.h5' % ((epoch+1), time_str, f1))
model.save(model_save_path)
if best_f1 < f1:
best_f1 = f1
best_epoch = epoch + 1
best_model = model_save_path
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_test, config, type="test")
print("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("###############################################################\n")
print("###############################################################\n")
file.flush()
os.rename(best_model, best_model
.replace(".h5", "_best.h5"))
print("train done. best epoch: %d, best: f1: %f, model path: %s" % (best_epoch, best_f1, best_model))
file.write("train done. best epoch: %d, best: f1: %f, model path: %s\n" % (best_epoch, best_f1, best_model))
CallBack.on_train_end(None)
file.close()
# evaluate
def model_eval(model, dataset, config, type="eval"):
| ata_train)
start_time = time.time()
llprint("Epoch %d/%d\n" % (epoch + 1, config["epochs"]))
losses = []
train_pred_output_prob = []
train_pred_output = []
train_real_output = []
file.write("Epoch: %d/%d\n" % ((epoch + 1), config["epochs"]))
for patient_index in range(train_size):
llprint("\rBatch %d/%d" % (patient_index + 1, train_size))
# 获取第index个企业dual序列
input_vec1, input_vec2, output_vec, o = prepare_tax_dual(data_train, index=patient_index)
train_real_output.append(o[0])
res = model.train_on_batch([input_vec1, input_vec2], output_vec)
losses.append(res[0])
prob = res[1][0][0]
train_pred_output_prob.append(prob) | conditional_block |
tax_task.py | _true', help='use position embed or not')
parser.add_argument('--position_embed_size', type=int, default=100, help='position embed size')
parser.add_argument('--position_embed_mode', type=str, default='sum', choices=['sum','concat'], help='position embed mode[sum,concat]')
parser.add_argument('--self_attention_units', type=int, default=64, help='self attention units')
parser.add_argument('--self_attention_num_heads', type=int, default=4, help='self attention num heads')
parser.add_argument('--no_history', action='store_true', help='use history attention or not')
parser.add_argument('--no_interaction', action='store_true', help='use interaction attention or not')
parser.add_argument('--no_memory', action='store_true', help='remove memory or not')
parser.add_argument('--memory_word_num', type=int, default=256, help='memory word num')
parser.add_argument('--memory_word_size', type=int, default=64, help='memory word size')
parser.add_argument('--memory_read_heads', type=int, default=4, help='memory read heads')
parser.add_argument('--feature_size', type=int, default=256, help='feature size')
parser.add_argument('--multi', action='store_true', help='multi-label classification or not')
parser.add_argument('--epochs', type=int, default=10, help='epochs')
parser.add_argument('--focal_loss', action='store_false', help='use focal loss')
parser.add_argument('--focal_loss_alpha', type=float, default=0.6, help='focal loss alpha')
parser.add_argument('--focal_loss_gamma', type=float, default=2.0, help='focal loss gamma')
parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')
parser.add_argument('--lr', type=float, default=0.00005, help='learning rate')
parser.add_argument('--lr_decay', type=float, default=1e-6, help='learning rate decay')
parser.add_argument('--model_path', type=str, help='model path')
args = parser.parse_args()
model_name = "AMANet-tax"
# time
time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
def write_log(callback, names, logs, epoch_no):
|
CallBack = TensorBoard(log_dir=('../tb-logs/tax-task/%s/%s' %(model_name, time_str)), # log dir
histogram_freq=0,
write_graph=True,
write_grads=True,
write_images=True,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
train_names = ['train_loss']
val_names = ["val_acc", "val_prec", "val_recall", "val_f1", "val_prauc", "val_roc_auc"]
def train(config):
# model save path
model_save_dir = os.path.join("../model/tax-task", model_name, time_str)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
# log save path
log_save_dir = os.path.join("../logs/tax-task", model_name, time_str)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
# load data
data_train, data_valid, data_test, voc_size = load_tax_data(config["datapath"])
# input1 vocab size
config["vocab_size1"] = voc_size[0]
# input1 vocab size
config["vocab_size2"] = voc_size[1]
# output vocab size
config["output_size"] = voc_size[2]
# build model
model = build(config)
# plot model graph
model_graph_file = os.path.join(model_save_dir, ("model_%s.png" % time_str))
plot_model(model, to_file=model_graph_file)
# model summary
model.summary()
# model tensorboard logs
CallBack.set_model(model)
# eval logs
file = open(os.path.join(log_save_dir, "statistic_%s.txt" % time_str), "w+")
file.write(str(config)+"\n")
model.summary(print_fn=lambda x: file.write(x + '\n'))
train_size = len(data_train)
best_f1 = 0.0
best_epoch = 0
best_model = ""
# train
for epoch in range(config["epochs"]):
# 新一次迭代,打乱训练集
data_train = shuffle(data_train)
start_time = time.time()
llprint("Epoch %d/%d\n" % (epoch + 1, config["epochs"]))
losses = []
train_pred_output_prob = []
train_pred_output = []
train_real_output = []
file.write("Epoch: %d/%d\n" % ((epoch + 1), config["epochs"]))
for patient_index in range(train_size):
llprint("\rBatch %d/%d" % (patient_index + 1, train_size))
# 获取第index个企业dual序列
input_vec1, input_vec2, output_vec, o = prepare_tax_dual(data_train, index=patient_index)
train_real_output.append(o[0])
res = model.train_on_batch([input_vec1, input_vec2], output_vec)
losses.append(res[0])
prob = res[1][0][0]
train_pred_output_prob.append(prob)
if prob >= 0.5:
train_pred_output.append(1)
else:
train_pred_output.append(0)
end_time = time.time()
elapsed_time = (end_time - start_time) / 60
train_acc, train_prec, train_recall, train_f1 = metrics_non_multi(train_real_output, train_pred_output)
train_roc_auc = roc_auc_non_multi(train_real_output, train_pred_output_prob)
train_prauc = prc_auc_non_multi(train_real_output, train_pred_output_prob)
if config["use_tensorboard"]:
train_logs = [sum(losses)/len(losses)]
write_log(CallBack, train_names, train_logs, epoch+1)
print('')
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_valid, config)
if config["use_tensorboard"]:
val_logs = [acc, pre, recall, f1, prauc, roc_auc]
write_log(CallBack, val_names, val_logs, epoch+1)
file.write("spend time to train: %.2f min\n" % elapsed_time)
file.write("train loss: %f\n" % (sum(losses)/ len(losses)))
file.write("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
print("spend time to train: %.2f min" % elapsed_time)
print("train loss: %f, acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % ((sum(losses)/ len(losses)), train_acc, train_prec, train_recall, train_f1, train_prauc, train_roc_auc))
print("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
model_save_path = os.path.join(model_save_dir, 'model_%d_%s_%.4f.h5' % ((epoch+1), time_str, f1))
model.save(model_save_path)
if best_f1 < f1:
best_f1 = f1
best_epoch = epoch + 1
best_model = model_save_path
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_test, config, type="test")
print("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("###############################################################\n")
print("###############################################################\n")
file.flush()
os.rename(best_model, best_model.replace(".h5", "_best.h5"))
print("train done. best epoch: %d, best: f1: %f, model path: %s" % (best_epoch, best_f1, best_model))
file.write("train done. best epoch: %d, best: f1: %f, model path: %s\n" % (best_epoch, best_f1, best_model))
CallBack.on_train_end(None)
file.close()
# evaluate
def model_eval(model, dataset, config, type="eval"):
eval | for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, epoch_no)
callback.writer.flush() | identifier_body |
tax_task.py | -label classification or not')
parser.add_argument('--epochs', type=int, default=10, help='epochs')
parser.add_argument('--focal_loss', action='store_false', help='use focal loss')
parser.add_argument('--focal_loss_alpha', type=float, default=0.6, help='focal loss alpha')
parser.add_argument('--focal_loss_gamma', type=float, default=2.0, help='focal loss gamma')
parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')
parser.add_argument('--lr', type=float, default=0.00005, help='learning rate')
parser.add_argument('--lr_decay', type=float, default=1e-6, help='learning rate decay')
parser.add_argument('--model_path', type=str, help='model path')
args = parser.parse_args()
model_name = "AMANet-tax"
# time
time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
def write_log(callback, names, logs, epoch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, epoch_no)
callback.writer.flush()
CallBack = TensorBoard(log_dir=('../tb-logs/tax-task/%s/%s' %(model_name, time_str)), # log dir
histogram_freq=0,
write_graph=True,
write_grads=True,
write_images=True,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
train_names = ['train_loss']
val_names = ["val_acc", "val_prec", "val_recall", "val_f1", "val_prauc", "val_roc_auc"]
def train(config):
# model save path
model_save_dir = os.path.join("../model/tax-task", model_name, time_str)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
# log save path
log_save_dir = os.path.join("../logs/tax-task", model_name, time_str)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
# load data
data_train, data_valid, data_test, voc_size = load_tax_data(config["datapath"])
# input1 vocab size
config["vocab_size1"] = voc_size[0]
# input1 vocab size
config["vocab_size2"] = voc_size[1]
# output vocab size
config["output_size"] = voc_size[2]
# build model
model = build(config)
# plot model graph
model_graph_file = os.path.join(model_save_dir, ("model_%s.png" % time_str))
plot_model(model, to_file=model_graph_file)
# model summary
model.summary()
# model tensorboard logs
CallBack.set_model(model)
# eval logs
file = open(os.path.join(log_save_dir, "statistic_%s.txt" % time_str), "w+")
file.write(str(config)+"\n")
model.summary(print_fn=lambda x: file.write(x + '\n'))
train_size = len(data_train)
best_f1 = 0.0
best_epoch = 0
best_model = ""
# train
for epoch in range(config["epochs"]):
# 新一次迭代,打乱训练集
data_train = shuffle(data_train)
start_time = time.time()
llprint("Epoch %d/%d\n" % (epoch + 1, config["epochs"]))
losses = []
train_pred_output_prob = []
train_pred_output = []
train_real_output = []
file.write("Epoch: %d/%d\n" % ((epoch + 1), config["epochs"]))
for patient_index in range(train_size):
llprint("\rBatch %d/%d" % (patient_index + 1, train_size))
# 获取第index个企业dual序列
input_vec1, input_vec2, output_vec, o = prepare_tax_dual(data_train, index=patient_index)
train_real_output.append(o[0])
res = model.train_on_batch([input_vec1, input_vec2], output_vec)
losses.append(res[0])
prob = res[1][0][0]
train_pred_output_prob.append(prob)
if prob >= 0.5:
train_pred_output.append(1)
else:
train_pred_output.append(0)
end_time = time.time()
elapsed_time = (end_time - start_time) / 60
train_acc, train_prec, train_recall, train_f1 = metrics_non_multi(train_real_output, train_pred_output)
train_roc_auc = roc_auc_non_multi(train_real_output, train_pred_output_prob)
train_prauc = prc_auc_non_multi(train_real_output, train_pred_output_prob)
if config["use_tensorboard"]:
train_logs = [sum(losses)/len(losses)]
write_log(CallBack, train_names, train_logs, epoch+1)
print('')
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_valid, config)
if config["use_tensorboard"]:
val_logs = [acc, pre, recall, f1, prauc, roc_auc]
write_log(CallBack, val_names, val_logs, epoch+1)
file.write("spend time to train: %.2f min\n" % elapsed_time)
file.write("train loss: %f\n" % (sum(losses)/ len(losses)))
file.write("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
print("spend time to train: %.2f min" % elapsed_time)
print("train loss: %f, acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % ((sum(losses)/ len(losses)), train_acc, train_prec, train_recall, train_f1, train_prauc, train_roc_auc))
print("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
model_save_path = os.path.join(model_save_dir, 'model_%d_%s_%.4f.h5' % ((epoch+1), time_str, f1))
model.save(model_save_path)
if best_f1 < f1:
best_f1 = f1
best_epoch = epoch + 1
best_model = model_save_path
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_test, config, type="test")
print("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("###############################################################\n")
print("###############################################################\n")
file.flush()
os.rename(best_model, best_model.replace(".h5", "_best.h5"))
print("train done. best epoch: %d, best: f1: %f, model path: %s" % (best_epoch, best_f1, best_model))
file.write("train done. best epoch: %d, best: f1: %f, model path: %s\n" % (best_epoch, best_f1, best_model))
CallBack.on_train_end(None)
file.close()
# evaluate
def model_eval(model, dataset, config, type="eval"):
eval_real_output = []
eval_pred_output_prob = []
eval_pred_output = []
data_size = len(dataset)
outputs = [model.get_layer('output').output]
layer_model = Model(inputs=model.input, outputs=outputs)
print("#####################%s#####################" % type)
for patient_index in range(data_size):
llprint("\rBatch: %d/%d" % (patient_index + 1, data_size))
dual = prepare_tax_dual(dataset, index=patient_index)
input_vec1, input_vec2, output_vec, o = dual
layer_model_output = layer_model.predict([input_vec1, input_vec2])
prob = layer_model_output[0][0]
eval_real_output.append(o[0])
eval_pred_output_prob.append(prob)
if prob >= 0.5:
eval_pred_output.append(1)
else:
eval_pred_output.append(0)
print('')
acc, prec, recall, f1 = metrics_non_multi(eval_real_output, eval_pred_output) | roc_auc = roc_auc_non_multi(eval_real_output, eval_pred_output_prob)
prauc = prc_auc_non_multi(eval_real_output, eval_pred_output_prob)
return acc, prec, recall, f1, prauc, roc_auc | random_line_split |
|
nn.rs |
/// back propagation
pub fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1]
} else {
inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else {0.0}
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
| {
assert_eq!(self.topology[0],inputs.len());
let mut m = Matrix::new(1,inputs.len(),inputs);
let mut all_results = Vec::with_capacity(self.topology.len() - 1);
self.weights.iter().enumerate().for_each(| (ix,wm) | {
add_column(&mut m,vec!(1.0));
m = mul(&m,wm);
//println!("after mul: {:?}",m);
let acts = self.activations[ix].activate(&get_data(&m));
m = Matrix::new(size(&m).0,size(&m).1,&acts);
//println!("after activation: {:?}",m);
all_results.push(acts);
});
assert_eq!(*self.topology.last().expect("empty topology!"),
all_results.last().expect("empty result!").len());
all_results
} | identifier_body |
|
nn.rs | fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1]
} else {
inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else |
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mattmazur() {
// <https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/>
let mut nn = Network::new(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(2,2,2),&vec!(
0.15, 0.25, 0.2, 0.3, 0.35, 0.35,
0.4, 0.5, 0.45, 0.55, 0.6, 0.6
));
let f1s = nn.forward(&vec!(0.05,0.1));
assert_eq!(f1s[0],vec!(0.593269992107187 | {0.0} | conditional_block |
nn.rs | fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1] | inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else {0.0}
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mattmazur() {
// <https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/>
let mut nn = Network::new(vec!(Box::new(Sigmoid{}),Box::new(Sigmoid{})),vec!(2,2,2),&vec!(
0.15, 0.25, 0.2, 0.3, 0.35, 0.35,
0.4, 0.5, 0.45, 0.55, 0.6, 0.6
));
let f1s = nn.forward(&vec!(0.05,0.1));
assert_eq!(f1s[0],vec!(0.5932699921071872, | } else { | random_line_split |
nn.rs | (&self, inputs :&[f64]) -> Vec<Vec<f64>> {
assert_eq!(self.topology[0],inputs.len());
let mut m = Matrix::new(1,inputs.len(),inputs);
let mut all_results = Vec::with_capacity(self.topology.len() - 1);
self.weights.iter().enumerate().for_each(| (ix,wm) | {
add_column(&mut m,vec!(1.0));
m = mul(&m,wm);
//println!("after mul: {:?}",m);
let acts = self.activations[ix].activate(&get_data(&m));
m = Matrix::new(size(&m).0,size(&m).1,&acts);
//println!("after activation: {:?}",m);
all_results.push(acts);
});
assert_eq!(*self.topology.last().expect("empty topology!"),
all_results.last().expect("empty result!").len());
all_results
}
/// back propagation
pub fn backward(&mut self, inputs :&[f64], outputs :Vec<Vec<f64>>, target :&[f64], learning_rate: f64 ) {
debug!("Error: {}", error(target, outputs.last().expect("outputs")));
let l = outputs.len();
let mut new_weights = self.weights.clone();
let mut new_targets = vec!();
for (order,weights) in self.weights.iter().rev().enumerate() {
let rev_order = l-order-1;
let previous_outputs = if rev_order>0 {
&outputs[rev_order-1]
} else {
inputs
};
let previous_size = size(&weights).0;
debug!("previous size: {}",previous_size);
debug!("weights to update: {:?}",size(&weights));
new_targets.push(vec!(0.0; previous_size));
for (i,o) in outputs[rev_order].iter().enumerate() {
let diff = if order==0 {
o - target[i]
} else {
new_targets[order-1][i]
};
let deriv = self.activations[rev_order].derive(&outputs[rev_order],i);
let d = diff * deriv;
debug!("Output: {} -> {}", o, d);
for r in 0..previous_size{
let w = get(&weights,r,i);
let p = if r<previous_outputs.len() {previous_outputs[r]} else {1.0};
let uw = update_weight(w, d * p, learning_rate);
debug!("Weight for row {}: {} -> {} -> {}", r, w, uw, w*d);
set(&mut new_weights[rev_order],r,i,uw);
new_targets[order][r]+=w*d;
}
}
debug!("New targets: {:?}",new_targets);
}
debug!("Before backprop: {:?}",self.weights);
self.weights=new_weights;
}
/// train for one input and one target
pub fn train(&mut self, inputs :&[f64], target :&[f64], learning_rate: f64, max_iter: usize, max_error: f64) -> (usize,f64) {
let mut err;
for it in 0..max_iter {
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
if err < max_error {
return (it,err);
}
self.backward(inputs,outputs,target,learning_rate);
}
let outputs = self.forward(inputs);
err = error(target, outputs.last().expect("outputs"));
(max_iter,err)
}
/// online training for multiple input/targets
pub fn train_online(&mut self, tests: &[Test], learning_rate: f64, max_iter: usize) -> f64 {
for _ in 0..max_iter {
for test in tests {
let outputs = self.forward(&test.input);
self.backward(&test.input,outputs,&test.target,learning_rate);
}
}
tests.iter().map(|t| {
let outputs = self.forward(&t.input);
error(&t.target, outputs.last().expect("outputs"))
}).sum()
}
}
// generate random initial weights
fn initial_random_weights(topology: &Vec<usize>) -> Vec<Matrix> {
let mut v = Vec::new();
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
v.push(Matrix::rand_range(psz+1,sz,0.0,1.0));
}
Some(sz)
}
);
assert_eq!(topology.len()-1,v.len());
v
}
// build a network from initial weights
fn initial_weights(topology: &Vec<usize>, weights: &[f64]) -> Vec<Matrix> {
let mut v = Vec::new();
let mut st = 0;
topology.iter().fold(None,
|prev, &sz| {
if let Some(psz) = prev {
let end =st + (psz+1) * sz;
assert!(end <= weights.len());
v.push(Matrix::new(psz+1,sz,&weights[st..end]));
st = end;
}
Some(sz)
}
);
assert_eq!(st,weights.len());
assert_eq!(topology.len()-1,v.len());
v
}
/// sigmoid function
pub fn sigmoid(i: f64) -> f64 {
f64::powi(1.0 + f64::exp(-i),-1)
}
/// hyperbolic tangent
pub fn hyptan(i: f64) -> f64 {
let minus2 = f64::exp(-2.0 * i);
(1.0-minus2)/(1.0+minus2)
}
/// RELU function
pub fn relu(i:f64) -> f64 {
f64::max(0.0,i)
}
/// softmax function
pub fn softmax(v: &[f64]) -> Vec<f64> {
let mut v2 = Vec::with_capacity(v.len());
let d = v.iter().max_by(|x,y| x.partial_cmp(y).expect("NaN")).expect("empty vector");
let s = v.iter().fold(0.0,|s,w|{
let e=f64::exp(*w-d);
v2.push(e);
s+e
});
if s == 0.0 {
v2
} else {
v2.iter().map(|w| w/s).collect()
}
}
/// error: sum of errors squared
pub fn error(target: &[f64], output: &[f64]) -> f64 {
target.iter().zip(output.iter()).map(|(t,o)| f64::powi(t-o,2)/2.0).sum()
}
/*fn diff_deriv(target: f64, output: f64) -> f64 {
let diff = output - target;
let deriv = output * (1.0 - output);
diff * deriv
}
fn weight_error(target: f64, output: f64, weighted_input: f64) -> f64 {
diff_deriv(target,output) * weighted_input
}*/
// get the updated value for a weight
fn update_weight(old: f64, error: f64, learning_rate: f64) -> f64 {
old - error * learning_rate
}
/// an activation function
pub trait Activation : std::fmt::Debug {
/// forward activation of all inputs
fn activate(&self, inputs: &[f64]) -> Vec<f64>;
/// derivation for one output given all the outputs and the output index
fn derive(&self, outputs: &[f64], index: usize) -> f64;
}
/// Sigmoid activation function
#[derive(Debug)]
pub struct Sigmoid{}
impl Activation for Sigmoid {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| sigmoid(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
outputs[index] * (1.0 - outputs[index])
}
}
/// Relu activation function
#[derive(Debug)]
pub struct Relu{}
impl Activation for Relu {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
inputs.iter().map(|v| relu(*v)).collect()
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
if outputs[index] > 0.0 {1.0} else {0.0}
}
}
/// Softmax activation function
#[derive(Debug)]
pub struct Softmax{}
impl Activation for Softmax {
fn activate(&self, inputs: &[f64]) -> Vec<f64> {
softmax(inputs)
}
fn derive(&self, outputs: &[f64], index: usize) -> f64 {
let s: f64 = outputs.iter().sum();
let el = outputs[index];
(s-el)*el / s.powi(2)
}
}
/// Encapsulate one possible input and the target output, for training
pub struct Test {
pub input: Vec<f64>,
pub target: Vec<f64 | forward | identifier_name |
|
vm.rs | Self {
Self {
config: Some(Box::new(config)),
functions,
}
}
/// Constructs a built-in program
pub fn new_builtin(functions: FunctionRegistry<BuiltinFunction<C>>) -> Self {
Self {
config: None,
functions,
}
}
/// Constructs a mock loader built-in program
pub fn new_mock() -> Self {
Self {
config: Some(Box::default()),
functions: FunctionRegistry::default(),
}
}
/// Get the configuration settings assuming this is a loader program
pub fn get_config(&self) -> &Config {
self.config.as_ref().unwrap()
}
/// Get the function registry
pub fn get_function_registry(&self) -> &FunctionRegistry<BuiltinFunction<C>> {
&self.functions
}
/// Calculate memory size
pub fn mem_size(&self) -> usize {
mem::size_of::<Self>()
+ if self.config.is_some() {
mem::size_of::<Config>()
} else {
0
}
+ self.functions.mem_size()
}
}
impl<C: ContextObject> Debug for BuiltinProgram<C> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
writeln!(f, "{:?}", unsafe {
// `derive(Debug)` does not know that `C: ContextObject` does not need to implement `Debug`
std::mem::transmute::<
&FunctionRegistry<BuiltinFunction<C>>,
&FunctionRegistry<BuiltinFunction<*const ()>>,
>(&self.functions)
})?;
Ok(())
}
}
/// VM configuration settings
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Config {
/// Maximum call depth
pub max_call_depth: usize,
/// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend
pub stack_frame_size: usize,
/// Enables the use of MemoryMapping and MemoryRegion for address translation
pub enable_address_translation: bool,
/// Enables gaps in VM address space between the stack frames
pub enable_stack_frame_gaps: bool,
/// Maximal pc distance after which a new instruction meter validation is emitted by the JIT
pub instruction_meter_checkpoint_distance: usize,
/// Enable instruction meter and limiting
pub enable_instruction_meter: bool,
/// Enable instruction tracing
pub enable_instruction_tracing: bool,
/// Enable dynamic string allocation for labels
pub enable_symbol_and_section_labels: bool,
/// Reject ELF files containing issues that the verifier did not catch before (up to v0.2.21)
pub reject_broken_elfs: bool,
/// Ratio of native host instructions per random no-op in JIT (0 = OFF)
pub noop_instruction_rate: u32,
/// Enable disinfection of immediate values and offsets provided by the user in JIT
pub sanitize_user_provided_values: bool,
/// Encrypt the runtime environment in JIT
pub encrypt_runtime_environment: bool,
/// Throw ElfError::SymbolHashCollision when a BPF function collides with a registered syscall
pub external_internal_function_hash_collision: bool,
/// Have the verifier reject "callx r10"
pub reject_callx_r10: bool,
/// Avoid copying read only sections when possible
pub optimize_rodata: bool,
/// Use the new ELF parser
pub new_elf_parser: bool,
/// Use aligned memory mapping
pub aligned_memory_mapping: bool,
/// Allow ExecutableCapability::V1
pub enable_sbpf_v1: bool,
/// Allow ExecutableCapability::V2
pub enable_sbpf_v2: bool,
}
impl Config {
/// Returns the size of the stack memory region
pub fn stack_size(&self) -> usize {
self.stack_frame_size * self.max_call_depth
}
}
impl Default for Config {
fn default() -> Self {
Self {
max_call_depth: 20,
stack_frame_size: 4_096,
enable_address_translation: true,
enable_stack_frame_gaps: true,
instruction_meter_checkpoint_distance: 10000,
enable_instruction_meter: true,
enable_instruction_tracing: false,
enable_symbol_and_section_labels: false,
reject_broken_elfs: false,
noop_instruction_rate: 256,
sanitize_user_provided_values: true,
encrypt_runtime_environment: true,
external_internal_function_hash_collision: true,
reject_callx_r10: true,
optimize_rodata: true,
new_elf_parser: true,
aligned_memory_mapping: true,
enable_sbpf_v1: true,
enable_sbpf_v2: true,
}
}
}
/// Static constructors for Executable
impl<C: ContextObject> Executable<C> {
/// Creates an executable from an ELF file
pub fn from_elf(elf_bytes: &[u8], loader: Arc<BuiltinProgram<C>>) -> Result<Self, EbpfError> {
let executable = Executable::load(elf_bytes, loader)?;
Ok(executable)
}
/// Creates an executable from machine code
pub fn from_text_bytes(
text_bytes: &[u8],
loader: Arc<BuiltinProgram<C>>,
sbpf_version: SBPFVersion,
function_registry: FunctionRegistry<usize>,
) -> Result<Self, EbpfError> {
Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry)
.map_err(EbpfError::ElfError)
}
}
/// Runtime context
pub trait ContextObject {
/// Called for every instruction executed when tracing is enabled
fn trace(&mut self, state: [u64; 12]);
/// Consume instructions from meter
fn consume(&mut self, amount: u64);
/// Get the number of remaining instructions allowed
fn get_remaining(&self) -> u64;
}
/// Simple instruction meter for testing
#[derive(Debug, Clone, Default)]
pub struct | {
/// Contains the register state at every instruction in order of execution
pub trace_log: Vec<TraceLogEntry>,
/// Maximal amount of instructions which still can be executed
pub remaining: u64,
}
impl ContextObject for TestContextObject {
fn trace(&mut self, state: [u64; 12]) {
self.trace_log.push(state);
}
fn consume(&mut self, amount: u64) {
self.remaining = self.remaining.saturating_sub(amount);
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl TestContextObject {
/// Initialize with instruction meter
pub fn new(remaining: u64) -> Self {
Self {
trace_log: Vec::new(),
remaining,
}
}
/// Compares an interpreter trace and a JIT trace.
///
/// The log of the JIT can be longer because it only validates the instruction meter at branches.
pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.trace_log.as_slice();
let mut jit = jit.trace_log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, | TestContextObject | identifier_name |
vm.rs | TestContextObject {
/// Contains the register state at every instruction in order of execution
pub trace_log: Vec<TraceLogEntry>,
/// Maximal amount of instructions which still can be executed
pub remaining: u64,
}
impl ContextObject for TestContextObject {
fn trace(&mut self, state: [u64; 12]) {
self.trace_log.push(state);
}
fn consume(&mut self, amount: u64) {
self.remaining = self.remaining.saturating_sub(amount);
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl TestContextObject {
/// Initialize with instruction meter
pub fn new(remaining: u64) -> Self {
Self {
trace_log: Vec::new(),
remaining,
}
}
/// Compares an interpreter trace and a JIT trace.
///
/// The log of the JIT can be longer because it only validates the instruction meter at branches.
pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.trace_log.as_slice();
let mut jit = jit.trace_log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
/// ];
/// let mem = &mut [
/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
/// ];
///
/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
/// let function_registry = FunctionRegistry::default();
/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader, SBPFVersion::V2, function_registry).unwrap();
/// executable.verify::<RequisiteVerifier>().unwrap();
/// let mut context_object = TestContextObject::new(1);
/// let config = executable.get_config();
/// let sbpf_version = executable.get_sbpf_version();
///
/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(config.stack_size());
/// let stack_len = stack.len();
/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
///
/// let regions: Vec<MemoryRegion> = vec![
/// executable.get_ro_region(),
/// MemoryRegion::new_writable(
/// stack.as_slice_mut(),
/// ebpf::MM_STACK_START,
/// ),
/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
/// ];
///
/// let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap();
///
/// let mut vm = EbpfVm::new(config, sbpf_version, &mut context_object, memory_mapping, stack_len);
///
/// let (instruction_count, result) = vm.execute_program(&executable, true);
/// assert_eq!(instruction_count, 1);
/// assert_eq!(result.unwrap(), 0);
/// ```
#[repr(C)]
pub struct EbpfVm<'a, C: ContextObject> {
/// Needed to exit from the guest back into the host
pub host_stack_pointer: *mut u64,
/// The current call depth.
///
/// Incremented on calls and decremented on exits. It's used to enforce
/// config.max_call_depth and to know when to terminate execution.
pub call_depth: u64,
/// Guest stack pointer (r11).
///
/// The stack pointer isn't exposed as an actual register. Only sub and add
/// instructions (typically generated by the LLVM backend) are allowed to
/// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only
/// stored here and therefore the register is not tracked in REGISTER_MAP.
pub stack_pointer: u64,
/// Pointer to ContextObject
pub context_object_pointer: &'a mut C,
/// Last return value of instruction_meter.get_remaining()
pub previous_instruction_meter: u64,
/// CPU cycles accumulated by the stop watch
pub stopwatch_numerator: u64,
/// Number of times the stop watch was used
pub stopwatch_denominator: u64,
/// ProgramResult inlined
pub program_result: ProgramResult,
/// MemoryMapping inlined
pub memory_mapping: MemoryMapping<'a>,
/// Stack of CallFrames used by the Interpreter
pub call_frames: Vec<CallFrame>,
/// TCP port for the debugger interface
#[cfg(feature = "debugger")]
pub debug_port: Option<u16>,
}
impl<'a, C: ContextObject> EbpfVm<'a, C> {
/// Creates a new virtual machine instance.
pub fn new(
config: &Config,
sbpf_version: &SBPFVersion,
context_object: &'a mut C,
mut memory_mapping: MemoryMapping<'a>,
stack_len: usize,
) -> Self {
let stack_pointer =
ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() {
// the stack is fully descending, frames start as empty and change size anytime r11 is modified
stack_len
} else {
// within a frame the stack grows down, but frames are ascending
config.stack_frame_size
} as u64);
if !config.enable_address_translation {
memory_mapping = MemoryMapping::new_identity();
}
EbpfVm {
host_stack_pointer: std::ptr::null_mut(),
call_depth: 0,
stack_pointer,
context_object_pointer: context_object,
previous_instruction_meter: 0,
stopwatch_numerator: 0,
stopwatch_denominator: 0,
program_result: ProgramResult::Ok(0),
memory_mapping,
call_frames: vec![CallFrame::default(); config.max_call_depth],
#[cfg(feature = "debugger")]
debug_port: None,
}
}
/// Execute the program
///
/// If interpreted = `false` then the JIT compiled executable is used.
pub fn execute_program(
&mut self,
executable: &Executable<C>,
interpreted: bool,
) -> (u64, ProgramResult) | {
let mut registers = [0u64; 12];
// R1 points to beginning of input memory, R10 to the stack of the first frame, R11 is the pc (hidden)
registers[1] = ebpf::MM_INPUT_START;
registers[ebpf::FRAME_PTR_REG] = self.stack_pointer;
registers[11] = executable.get_entrypoint_instruction_offset() as u64;
let config = executable.get_config();
let initial_insn_count = if config.enable_instruction_meter {
self.context_object_pointer.get_remaining()
} else {
0
};
self.previous_instruction_meter = initial_insn_count;
self.program_result = ProgramResult::Ok(0);
let due_insn_count = if interpreted {
#[cfg(feature = "debugger")]
let debug_port = self.debug_port.clone();
let mut interpreter = Interpreter::new(self, executable, registers);
#[cfg(feature = "debugger")]
if let Some(debug_port) = debug_port { | identifier_body |
|
vm.rs | _elf_parser: bool,
/// Use aligned memory mapping
pub aligned_memory_mapping: bool,
/// Allow ExecutableCapability::V1
pub enable_sbpf_v1: bool,
/// Allow ExecutableCapability::V2
pub enable_sbpf_v2: bool,
}
impl Config {
/// Returns the size of the stack memory region
pub fn stack_size(&self) -> usize {
self.stack_frame_size * self.max_call_depth
}
}
impl Default for Config {
fn default() -> Self {
Self {
max_call_depth: 20,
stack_frame_size: 4_096,
enable_address_translation: true,
enable_stack_frame_gaps: true,
instruction_meter_checkpoint_distance: 10000,
enable_instruction_meter: true,
enable_instruction_tracing: false,
enable_symbol_and_section_labels: false,
reject_broken_elfs: false,
noop_instruction_rate: 256,
sanitize_user_provided_values: true,
encrypt_runtime_environment: true,
external_internal_function_hash_collision: true,
reject_callx_r10: true,
optimize_rodata: true,
new_elf_parser: true,
aligned_memory_mapping: true,
enable_sbpf_v1: true,
enable_sbpf_v2: true,
}
}
}
/// Static constructors for Executable
impl<C: ContextObject> Executable<C> {
/// Creates an executable from an ELF file
pub fn from_elf(elf_bytes: &[u8], loader: Arc<BuiltinProgram<C>>) -> Result<Self, EbpfError> {
let executable = Executable::load(elf_bytes, loader)?;
Ok(executable)
}
/// Creates an executable from machine code
pub fn from_text_bytes(
text_bytes: &[u8],
loader: Arc<BuiltinProgram<C>>,
sbpf_version: SBPFVersion,
function_registry: FunctionRegistry<usize>,
) -> Result<Self, EbpfError> {
Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry)
.map_err(EbpfError::ElfError)
}
}
/// Runtime context
pub trait ContextObject {
/// Called for every instruction executed when tracing is enabled
fn trace(&mut self, state: [u64; 12]);
/// Consume instructions from meter
fn consume(&mut self, amount: u64);
/// Get the number of remaining instructions allowed
fn get_remaining(&self) -> u64;
}
/// Simple instruction meter for testing
#[derive(Debug, Clone, Default)]
pub struct TestContextObject {
/// Contains the register state at every instruction in order of execution
pub trace_log: Vec<TraceLogEntry>,
/// Maximal amount of instructions which still can be executed
pub remaining: u64,
}
impl ContextObject for TestContextObject {
fn trace(&mut self, state: [u64; 12]) {
self.trace_log.push(state);
}
fn consume(&mut self, amount: u64) {
self.remaining = self.remaining.saturating_sub(amount);
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl TestContextObject {
/// Initialize with instruction meter
pub fn new(remaining: u64) -> Self {
Self {
trace_log: Vec::new(),
remaining,
}
}
/// Compares an interpreter trace and a JIT trace.
///
/// The log of the JIT can be longer because it only validates the instruction meter at branches.
pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.trace_log.as_slice();
let mut jit = jit.trace_log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
/// ];
/// let mem = &mut [
/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
/// ];
///
/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
/// let function_registry = FunctionRegistry::default();
/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader, SBPFVersion::V2, function_registry).unwrap();
/// executable.verify::<RequisiteVerifier>().unwrap();
/// let mut context_object = TestContextObject::new(1);
/// let config = executable.get_config();
/// let sbpf_version = executable.get_sbpf_version();
///
/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(config.stack_size());
/// let stack_len = stack.len();
/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
///
/// let regions: Vec<MemoryRegion> = vec![
/// executable.get_ro_region(),
/// MemoryRegion::new_writable(
/// stack.as_slice_mut(),
/// ebpf::MM_STACK_START,
/// ),
/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
/// ];
///
/// let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap();
///
/// let mut vm = EbpfVm::new(config, sbpf_version, &mut context_object, memory_mapping, stack_len);
///
/// let (instruction_count, result) = vm.execute_program(&executable, true);
/// assert_eq!(instruction_count, 1);
/// assert_eq!(result.unwrap(), 0);
/// ```
#[repr(C)]
pub struct EbpfVm<'a, C: ContextObject> {
/// Needed to exit from the guest back into the host
pub host_stack_pointer: *mut u64,
/// The current call depth.
///
/// Incremented on calls and decremented on exits. It's used to enforce
/// config.max_call_depth and to know when to terminate execution.
pub call_depth: u64,
/// Guest stack pointer (r11).
///
/// The stack pointer isn't exposed as an actual register. Only sub and add
/// instructions (typically generated by the LLVM backend) are allowed to
/// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only
/// stored here and therefore the register is not tracked in REGISTER_MAP.
pub stack_pointer: u64,
/// Pointer to ContextObject
pub context_object_pointer: &'a mut C,
/// Last return value of instruction_meter.get_remaining()
pub previous_instruction_meter: u64,
/// CPU cycles accumulated by the stop watch
pub stopwatch_numerator: u64,
/// Number of times the stop watch was used
pub stopwatch_denominator: u64,
/// ProgramResult inlined
pub program_result: ProgramResult,
/// MemoryMapping inlined
pub memory_mapping: MemoryMapping<'a>,
/// Stack of CallFrames used by the Interpreter
pub call_frames: Vec<CallFrame>,
/// TCP port for the debugger interface
#[cfg(feature = "debugger")] | pub debug_port: Option<u16>,
} | random_line_split |
|
vm.rs | .as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
/// Statistic of taken branches (from a recorded trace)
pub struct DynamicAnalysis {
/// Maximal edge counter value
pub edge_counter_max: usize,
/// src_node, dst_node, edge_counter
pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
}
impl DynamicAnalysis {
/// Accumulates a trace
pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
let mut result = Self {
edge_counter_max: 0,
edges: BTreeMap::new(),
};
let mut last_basic_block = usize::MAX;
for traced_instruction in trace_log.iter() {
let pc = traced_instruction[11] as usize;
if analysis.cfg_nodes.contains_key(&pc) {
let counter = result
.edges
.entry(last_basic_block)
.or_default()
.entry(pc)
.or_insert(0);
*counter += 1;
result.edge_counter_max = result.edge_counter_max.max(*counter);
last_basic_block = pc;
}
}
result
}
}
/// A call frame used for function calls inside the Interpreter
#[derive(Clone, Default)]
pub struct CallFrame {
/// The caller saved registers
pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
/// The callers frame pointer
pub frame_pointer: u64,
/// The target_pc of the exit instruction which returns back to the caller
pub target_pc: usize,
}
/// A virtual machine to run eBPF programs.
///
/// # Examples
///
/// ```
/// use solana_rbpf::{
/// aligned_memory::AlignedMemory,
/// ebpf,
/// elf::{Executable, FunctionRegistry, SBPFVersion},
/// memory_region::{MemoryMapping, MemoryRegion},
/// verifier::{RequisiteVerifier},
/// vm::{BuiltinProgram, Config, EbpfVm, TestContextObject},
/// };
///
/// let prog = &[
/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit
/// ];
/// let mem = &mut [
/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
/// ];
///
/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
/// let function_registry = FunctionRegistry::default();
/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader, SBPFVersion::V2, function_registry).unwrap();
/// executable.verify::<RequisiteVerifier>().unwrap();
/// let mut context_object = TestContextObject::new(1);
/// let config = executable.get_config();
/// let sbpf_version = executable.get_sbpf_version();
///
/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(config.stack_size());
/// let stack_len = stack.len();
/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
///
/// let regions: Vec<MemoryRegion> = vec![
/// executable.get_ro_region(),
/// MemoryRegion::new_writable(
/// stack.as_slice_mut(),
/// ebpf::MM_STACK_START,
/// ),
/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
/// ];
///
/// let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap();
///
/// let mut vm = EbpfVm::new(config, sbpf_version, &mut context_object, memory_mapping, stack_len);
///
/// let (instruction_count, result) = vm.execute_program(&executable, true);
/// assert_eq!(instruction_count, 1);
/// assert_eq!(result.unwrap(), 0);
/// ```
#[repr(C)]
pub struct EbpfVm<'a, C: ContextObject> {
/// Needed to exit from the guest back into the host
pub host_stack_pointer: *mut u64,
/// The current call depth.
///
/// Incremented on calls and decremented on exits. It's used to enforce
/// config.max_call_depth and to know when to terminate execution.
pub call_depth: u64,
/// Guest stack pointer (r11).
///
/// The stack pointer isn't exposed as an actual register. Only sub and add
/// instructions (typically generated by the LLVM backend) are allowed to
/// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only
/// stored here and therefore the register is not tracked in REGISTER_MAP.
pub stack_pointer: u64,
/// Pointer to ContextObject
pub context_object_pointer: &'a mut C,
/// Last return value of instruction_meter.get_remaining()
pub previous_instruction_meter: u64,
/// CPU cycles accumulated by the stop watch
pub stopwatch_numerator: u64,
/// Number of times the stop watch was used
pub stopwatch_denominator: u64,
/// ProgramResult inlined
pub program_result: ProgramResult,
/// MemoryMapping inlined
pub memory_mapping: MemoryMapping<'a>,
/// Stack of CallFrames used by the Interpreter
pub call_frames: Vec<CallFrame>,
/// TCP port for the debugger interface
#[cfg(feature = "debugger")]
pub debug_port: Option<u16>,
}
impl<'a, C: ContextObject> EbpfVm<'a, C> {
/// Creates a new virtual machine instance.
pub fn new(
config: &Config,
sbpf_version: &SBPFVersion,
context_object: &'a mut C,
mut memory_mapping: MemoryMapping<'a>,
stack_len: usize,
) -> Self {
let stack_pointer =
ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() {
// the stack is fully descending, frames start as empty and change size anytime r11 is modified
stack_len
} else {
// within a frame the stack grows down, but frames are ascending
config.stack_frame_size
} as u64);
if !config.enable_address_translation {
memory_mapping = MemoryMapping::new_identity();
}
EbpfVm {
host_stack_pointer: std::ptr::null_mut(),
call_depth: 0,
stack_pointer,
context_object_pointer: context_object,
previous_instruction_meter: 0,
stopwatch_numerator: 0,
stopwatch_denominator: 0,
program_result: ProgramResult::Ok(0),
memory_mapping,
call_frames: vec![CallFrame::default(); config.max_call_depth],
#[cfg(feature = "debugger")]
debug_port: None,
}
}
/// Execute the program
///
/// If interpreted = `false` then the JIT compiled executable is used.
pub fn execute_program(
&mut self,
executable: &Executable<C>,
interpreted: bool,
) -> (u64, ProgramResult) {
let mut registers = [0u64; 12];
// R1 points to beginning of input memory, R10 to the stack of the first frame, R11 is the pc (hidden)
registers[1] = ebpf::MM_INPUT_START;
registers[ebpf::FRAME_PTR_REG] = self.stack_pointer;
registers[11] = executable.get_entrypoint_instruction_offset() as u64;
let config = executable.get_config();
let initial_insn_count = if config.enable_instruction_meter {
self.context_object_pointer.get_remaining()
} else {
0
};
self.previous_instruction_meter = initial_insn_count;
self.program_result = ProgramResult::Ok(0);
let due_insn_count = if interpreted {
#[cfg(feature = "debugger")]
let debug_port = self.debug_port.clone();
let mut interpreter = Interpreter::new(self, executable, registers);
#[cfg(feature = "debugger")]
if let Some(debug_port) = debug_port {
crate::debugger::execute(&mut interpreter, debug_port);
} else {
while interpreter.step() {}
}
#[cfg(not(feature = "debugger"))]
while interpreter.step() {}
interpreter.due_insn_count
} else | {
#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
{
let compiled_program = match executable
.get_compiled_program()
.ok_or_else(|| Box::new(EbpfError::JitNotCompiled))
{
Ok(compiled_program) => compiled_program,
Err(error) => return (0, ProgramResult::Err(error)),
};
let instruction_meter_final =
compiled_program.invoke(config, self, registers).max(0) as u64;
self.context_object_pointer
.get_remaining()
.saturating_sub(instruction_meter_final)
}
#[cfg(not(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64")))]
{
return (0, ProgramResult::Err(Box::new(EbpfError::JitNotCompiled)));
} | conditional_block |
|
ed25519.rs | 32];
h.input(secret_key.as_bytes());
hash.copy_from_slice(h.fixed_result().as_slice());
digest = array_mut_ref!(&mut hash, 0, 32);
digest[0] &= 248;
digest[31] &= 127;
digest[31] |= 64;
pk = (&Scalar(*digest) * &constants::ED25519_BASEPOINT_TABLE).compress().to_bytes();
PublicKey(CompressedEdwardsY(pk))
}
/// Verify a signature on a message with this keypair's public key.
///
/// # Return
///
/// Returns true if the signature was successfully verified, and
/// false otherwise.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: Digest<OutputSize = U64> + Default {
use curve25519_dalek::edwards::vartime;
let mut h: D = D::default();
let mut a: ExtendedPoint;
let ao: Option<ExtendedPoint>;
let r: ExtendedPoint;
let digest: [u8; 64];
let digest_reduced: Scalar;
if signature.0[63] & 224 != 0 {
return false;
}
ao = self.decompress();
if ao.is_some() {
a = ao.unwrap();
} else {
return false;
}
a = -(&a);
let top_half: &[u8; 32] = array_ref!(&signature.0, 32, 32);
let bottom_half: &[u8; 32] = array_ref!(&signature.0, 0, 32);
h.input(&bottom_half[..]);
h.input(&self.to_bytes());
h.input(&message);
let digest_bytes = h.fixed_result();
digest = *array_ref!(digest_bytes, 0, 64);
digest_reduced = Scalar::reduce(&digest);
r = vartime::double_scalar_mult_basepoint(&digest_reduced, &a, &Scalar(*top_half));
slices_equal(bottom_half, &r.compress().to_bytes()) == 1
}
}
/// An ed25519 keypair.
#[derive(Debug)]
#[repr(C)]
pub struct Keypair {
/// The public half of this keypair.
pub public: PublicKey,
/// The secret half of this keypair.
pub secret: SecretKey,
}
impl Keypair {
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `public`: a `[u8; 32]` representing the compressed Edwards-Y
/// coordinate of a point on curve25519.
/// * `secret`: a `[u8; 32]` representing the corresponding secret key.
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Keypair`.
pub fn from_bytes<'a>(public: &'a [u8; 32], secret: &'a [u8; 32]) -> Keypair {
Keypair{ public: PublicKey::from_bytes(public),
secret: SecretKey::from_bytes(secret), }
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let mut cspring: OsRng = OsRng::new().unwrap();
/// let keypair: Keypair = Keypair::generate::<Sha512>(&mut cspring);
///
/// # }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
#[cfg(feature = "std")]
pub fn generate<D>(csprng: &mut Rng) -> Keypair
where D: Digest<OutputSize = U64> + Default {
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = PublicKey::from_secret::<D>(&sk);
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign<D>(&self, message: &[u8]) -> Signature
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut signature_bytes: [u8; 64] = [0u8; SIGNATURE_LENGTH];
let mut expanded_key_secret: Scalar;
let mesg_digest: Scalar;
let hram_digest: Scalar;
let r: ExtendedPoint;
let s: Scalar;
let t: CompressedEdwardsY;
let secret_key: &[u8; 32] = self.secret.as_bytes();
let public_key: &[u8; 32] = self.public.as_bytes();
h.input(secret_key);
hash.copy_from_slice(h.fixed_result().as_slice());
expanded_key_secret = Scalar(*array_ref!(&hash, 0, 32));
expanded_key_secret[0] &= 248;
expanded_key_secret[31] &= 63;
expanded_key_secret[31] |= 64;
h = D::default();
h.input(&hash[32..]);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
mesg_digest = Scalar::reduce(&hash);
r = &mesg_digest * &constants::ED25519_BASEPOINT_TABLE;
h = D::default();
h.input(&r.compress().to_bytes()[..]);
h.input(public_key);
h.input(&message);
hash.copy_from_slice(h.fixed_result().as_slice());
hram_digest = Scalar::reduce(&hash);
s = Scalar::multiply_add(&hram_digest, &expanded_key_secret, &mesg_digest);
t = r.compress();
signature_bytes[..32].copy_from_slice(&t.0);
signature_bytes[32..64].copy_from_slice(&s.0);
Signature(*array_ref!(&signature_bytes, 0, 64))
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: FixedOutput<OutputSize = U64> + BlockInput + Default + Input {
self.public.verify::<D>(message, signature)
}
}
#[cfg(test)]
mod test {
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use std::string::String;
use std::vec::Vec;
use curve25519_dalek::edwards::ExtendedPoint;
use rand::OsRng;
use hex::FromHex;
use sha2::Sha512;
use super::*;
#[test]
fn unmarshal_marshal() { // TestUnmarshalMarshal
let mut cspring: OsRng;
let mut keypair: Keypair;
let mut x: Option<ExtendedPoint>;
let a: ExtendedPoint;
let public: PublicKey;
cspring = OsRng::new().unwrap();
// from_bytes() fails if vx²-u=0 and vx²+u=0
loop {
keypair = Keypair::generate::<Sha512>(&mut cspring);
x = keypair.public.decompress();
if x.is_some() {
a = x.unwrap();
break;
}
}
public = PublicKey(a.compress());
assert!(keypair.public.0 == public.0);
}
#[test]
fn sign_ver | ify() { // | identifier_name |
|
ed25519.rs | _bytes(bytes: &[u8]) -> SecretKey {
SecretKey(*array_ref!(bytes, 0, SECRET_KEY_LENGTH))
}
/// Generate a `SecretKey` from a `csprng`.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::Signature;
///
/// let mut csprng: OsRng = OsRng::new().unwrap();
/// let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// # }
/// ```
///
/// Afterwards, you can generate the corresponding public—provided you also
/// supply a hash function which implements the `Digest` and `Default`
/// traits, and which returns 512 bits of output—via:
///
/// ```
/// # extern crate rand;
/// # extern crate sha2;
/// # extern crate ed25519_dalek;
/// #
/// # fn main() {
/// #
/// # use rand::Rng;
/// # use rand::OsRng;
/// # use sha2::Sha512;
/// # use ed25519_dalek::PublicKey;
/// # use ed25519_dalek::SecretKey;
/// # use ed25519_dalek::Signature;
/// #
/// # let mut csprng: OsRng = OsRng::new().unwrap();
/// # let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// let public_key: PublicKey = PublicKey::from_secret::<Sha512>(&secret_key);
/// # }
/// ```
///
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
///
/// # Input
///
/// A CSPRING with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
#[cfg(feature = "std")]
pub fn generate(csprng: &mut Rng) -> SecretKey {
let mut sk: SecretKey = SecretKey([0u8; 32]);
csprng.fill_bytes(&mut sk.0);
sk
}
}
/// An ed25519 public key.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PublicKey(pub CompressedEdwardsY);
impl Debug for PublicKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "PublicKey( CompressedPoint( {:?} ))", self.0)
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; PUBLIC_KEY_LENGTH] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; PUBLIC_KEY_LENGTH] {
&(self.0).0
}
/// Construct a `PublicKey` from a slice of bytes.
///
/// # Warning
///
/// The caller is responsible for ensuring that the bytes passed into this
/// method actually represent a `curve25519_dalek::curve::CompressedEdwardsY`
/// and that said compressed point is actually a point on the curve.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::PUBLIC_KEY_LENGTH;
///
/// let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [
/// 215, 90, 152, 1, 130, 177, 10, 183, 213, 75, 254, 211, 201, 100, 7, 58,
/// 14, 225, 114, 243, 218, 166, 35, 37, 175, 2, 26, 104, 247, 7, 81, 26];
///
/// let public_key: PublicKey = PublicKey::from_bytes(&public_key_bytes);
/// # }
/// ```
///
/// # Returns
///
/// A `PublicKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> PublicKey {
PublicKey(CompressedEdwardsY(*array_ref!(bytes, 0, 32)))
}
/// Convert this public key to its underlying extended twisted Edwards coordinate.
#[inline]
fn decompress(&self) -> Option<ExtendedPoint> {
self.0.decompress()
}
/// Derive this public key from its corresponding `SecretKey`.
#[cfg(feature = "std")]
#[allow(unused_assignments)]
pub fn from_secret<D>(secret_key: &SecretKey) -> PublicKey
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let pk: [u8; 32];
let mut digest: &mut [u8; 32];
h.input(secret_key.as_bytes());
hash.copy_from_slice(h.fixed_result().as_slice());
digest = array_mut_ref!(&mut hash, 0, 32);
digest[0] &= 248;
digest[31] &= 127;
digest[31] |= 64;
pk = (&Scalar(*digest) * &constants::ED25519_BASEPOINT_TABLE).compress().to_bytes();
PublicKey(CompressedEdwardsY(pk))
}
/// Verify a signature on a message with this keypair's public key.
///
/// # Return
///
/// Returns true if the signature was successfully verified, and
/// false otherwise.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: Digest<OutputSize = U64> + Default {
use curve25519_dalek::edwards::vartime;
let mut h: D = D::default();
let mut a: ExtendedPoint;
let ao: Option<ExtendedPoint>;
let r: ExtendedPoint;
let digest: [u8; 64];
let digest_reduced: Scalar;
if signature.0[63] & 224 != 0 { | if ao.is_some() {
a = ao.unwrap();
} else {
return false;
}
a = -(&a);
let top_half: &[u8; 32] = array_ref!(&signature.0, 32, 32);
let bottom_half: &[u8; 32] = array_ref!(&signature.0, 0, 32);
h.input(&bottom_half[..]);
h.input(&self.to_bytes());
h.input(&message);
let digest_bytes = h.fixed_result();
digest = *array_ref!(digest_bytes, 0, 64);
digest_reduced = Scalar::reduce(&digest);
r = vartime::double_scalar_mult_basepoint(&digest_reduced, &a, &Scalar(*top_half));
slices_equal(bottom_half, &r.compress().to_bytes()) == 1
}
}
/// An ed25519 keypair.
#[derive(Debug)]
#[repr(C)]
pub struct Keypair {
/// The public half of this keypair.
pub public: PublicKey,
/// The secret half of this keypair.
pub secret: SecretKey,
}
impl Keypair {
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `public`: a `[u8; 32]` representing the compressed Edwards-Y
/// coordinate of a point on curve25519.
/// * `secret`: a `[u8; 32]` representing | return false;
}
ao = self.decompress();
| random_line_split |
ed25519.rs | _bytes(bytes: &[u8]) -> SecretKey {
SecretKey(*array_ref!(bytes, 0, SECRET_KEY_LENGTH))
}
/// Generate a `SecretKey` from a `csprng`.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::Signature;
///
/// let mut csprng: OsRng = OsRng::new().unwrap();
/// let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// # }
/// ```
///
/// Afterwards, you can generate the corresponding public—provided you also
/// supply a hash function which implements the `Digest` and `Default`
/// traits, and which returns 512 bits of output—via:
///
/// ```
/// # extern crate rand;
/// # extern crate sha2;
/// # extern crate ed25519_dalek;
/// #
/// # fn main() {
/// #
/// # use rand::Rng;
/// # use rand::OsRng;
/// # use sha2::Sha512;
/// # use ed25519_dalek::PublicKey;
/// # use ed25519_dalek::SecretKey;
/// # use ed25519_dalek::Signature;
/// #
/// # let mut csprng: OsRng = OsRng::new().unwrap();
/// # let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// let public_key: PublicKey = PublicKey::from_secret::<Sha512>(&secret_key);
/// # }
/// ```
///
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
///
/// # Input
///
/// A CSPRING with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
#[cfg(feature = "std")]
pub fn generate(csprng: &mut Rng) -> SecretKey {
let mut sk: SecretKey = SecretKey([0u8; 32]);
csprng.fill_bytes(&mut sk.0);
sk
}
}
/// An ed25519 public key.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PublicKey(pub CompressedEdwardsY);
impl Debug for PublicKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "PublicKey( CompressedPoint( {:?} ))", self.0)
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; PUBLIC_KEY_LENGTH] {
| /// View this public key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; PUBLIC_KEY_LENGTH] {
&(self.0).0
}
/// Construct a `PublicKey` from a slice of bytes.
///
/// # Warning
///
/// The caller is responsible for ensuring that the bytes passed into this
/// method actually represent a `curve25519_dalek::curve::CompressedEdwardsY`
/// and that said compressed point is actually a point on the curve.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::PUBLIC_KEY_LENGTH;
///
/// let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [
/// 215, 90, 152, 1, 130, 177, 10, 183, 213, 75, 254, 211, 201, 100, 7, 58,
/// 14, 225, 114, 243, 218, 166, 35, 37, 175, 2, 26, 104, 247, 7, 81, 26];
///
/// let public_key: PublicKey = PublicKey::from_bytes(&public_key_bytes);
/// # }
/// ```
///
/// # Returns
///
/// A `PublicKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> PublicKey {
PublicKey(CompressedEdwardsY(*array_ref!(bytes, 0, 32)))
}
/// Convert this public key to its underlying extended twisted Edwards coordinate.
#[inline]
fn decompress(&self) -> Option<ExtendedPoint> {
self.0.decompress()
}
/// Derive this public key from its corresponding `SecretKey`.
#[cfg(feature = "std")]
#[allow(unused_assignments)]
pub fn from_secret<D>(secret_key: &SecretKey) -> PublicKey
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let pk: [u8; 32];
let mut digest: &mut [u8; 32];
h.input(secret_key.as_bytes());
hash.copy_from_slice(h.fixed_result().as_slice());
digest = array_mut_ref!(&mut hash, 0, 32);
digest[0] &= 248;
digest[31] &= 127;
digest[31] |= 64;
pk = (&Scalar(*digest) * &constants::ED25519_BASEPOINT_TABLE).compress().to_bytes();
PublicKey(CompressedEdwardsY(pk))
}
/// Verify a signature on a message with this keypair's public key.
///
/// # Return
///
/// Returns true if the signature was successfully verified, and
/// false otherwise.
pub fn verify<D>(&self, message: &[u8], signature: &Signature) -> bool
where D: Digest<OutputSize = U64> + Default {
use curve25519_dalek::edwards::vartime;
let mut h: D = D::default();
let mut a: ExtendedPoint;
let ao: Option<ExtendedPoint>;
let r: ExtendedPoint;
let digest: [u8; 64];
let digest_reduced: Scalar;
if signature.0[63] & 224 != 0 {
return false;
}
ao = self.decompress();
if ao.is_some() {
a = ao.unwrap();
} else {
return false;
}
a = -(&a);
let top_half: &[u8; 32] = array_ref!(&signature.0, 32, 32);
let bottom_half: &[u8; 32] = array_ref!(&signature.0, 0, 32);
h.input(&bottom_half[..]);
h.input(&self.to_bytes());
h.input(&message);
let digest_bytes = h.fixed_result();
digest = *array_ref!(digest_bytes, 0, 64);
digest_reduced = Scalar::reduce(&digest);
r = vartime::double_scalar_mult_basepoint(&digest_reduced, &a, &Scalar(*top_half));
slices_equal(bottom_half, &r.compress().to_bytes()) == 1
}
}
/// An ed25519 keypair.
#[derive(Debug)]
#[repr(C)]
pub struct Keypair {
/// The public half of this keypair.
pub public: PublicKey,
/// The secret half of this keypair.
pub secret: SecretKey,
}
impl Keypair {
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `public`: a `[u8; 32]` representing the compressed Edwards-Y
/// coordinate of a point on curve25519.
/// * `secret`: a `[u8; 32]` | self.0.to_bytes()
}
| identifier_body |
ed25519.rs | lse {
return false;
}
}
}
impl Signature {
/// View this `Signature` as a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] {
self.0
}
/// View this `Signature` as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SIGNATURE_LENGTH] {
&self.0
}
/// Construct a `Signature` from a slice of bytes.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> Signature {
Signature(*array_ref!(bytes, 0, SIGNATURE_LENGTH))
}
}
/// An EdDSA secret key.
#[repr(C)]
pub struct SecretKey(pub [u8; SECRET_KEY_LENGTH]);
impl Debug for SecretKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "SecretKey: {:?}", &self.0[..])
}
}
impl SecretKey {
/// Convert this secret key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; SECRET_KEY_LENGTH] {
self.0
}
/// View this secret key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; SECRET_KEY_LENGTH] {
&self.0
}
/// Construct a `SecretKey` from a slice of bytes.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::SECRET_KEY_LENGTH;
///
/// let secret_key_bytes: [u8; SECRET_KEY_LENGTH] = [
/// 157, 097, 177, 157, 239, 253, 090, 096,
/// 186, 132, 074, 244, 146, 236, 044, 196,
/// 068, 073, 197, 105, 123, 050, 105, 025,
/// 112, 059, 172, 003, 028, 174, 127, 096, ];
///
/// let secret_key: SecretKey = SecretKey::from_bytes(&secret_key_bytes[..]);
/// # }
/// ```
///
/// # Returns
///
/// An EdDSA `SecretKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> SecretKey {
SecretKey(*array_ref!(bytes, 0, SECRET_KEY_LENGTH))
}
/// Generate a `SecretKey` from a `csprng`.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate sha2;
/// extern crate ed25519_dalek;
///
/// # fn main() {
///
/// use rand::Rng;
/// use rand::OsRng;
/// use sha2::Sha512;
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::SecretKey;
/// use ed25519_dalek::Signature;
///
/// let mut csprng: OsRng = OsRng::new().unwrap();
/// let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// # }
/// ```
///
/// Afterwards, you can generate the corresponding public—provided you also
/// supply a hash function which implements the `Digest` and `Default`
/// traits, and which returns 512 bits of output—via:
///
/// ```
/// # extern crate rand;
/// # extern crate sha2;
/// # extern crate ed25519_dalek;
/// #
/// # fn main() {
/// #
/// # use rand::Rng;
/// # use rand::OsRng;
/// # use sha2::Sha512;
/// # use ed25519_dalek::PublicKey;
/// # use ed25519_dalek::SecretKey;
/// # use ed25519_dalek::Signature;
/// #
/// # let mut csprng: OsRng = OsRng::new().unwrap();
/// # let secret_key: SecretKey = SecretKey::generate(&mut csprng);
///
/// let public_key: PublicKey = PublicKey::from_secret::<Sha512>(&secret_key);
/// # }
/// ```
///
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
///
/// # Input
///
/// A CSPRING with a `fill_bytes()` method, e.g. the one returned
/// from `rand::OsRng::new()` (in the `rand` crate).
///
#[cfg(feature = "std")]
pub fn generate(csprng: &mut Rng) -> SecretKey {
let mut sk: SecretKey = SecretKey([0u8; 32]);
csprng.fill_bytes(&mut sk.0);
sk
}
}
/// An ed25519 public key.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PublicKey(pub CompressedEdwardsY);
impl Debug for PublicKey {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "PublicKey( CompressedPoint( {:?} ))", self.0)
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; PUBLIC_KEY_LENGTH] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8; PUBLIC_KEY_LENGTH] {
&(self.0).0
}
/// Construct a `PublicKey` from a slice of bytes.
///
/// # Warning
///
/// The caller is responsible for ensuring that the bytes passed into this
/// method actually represent a `curve25519_dalek::curve::CompressedEdwardsY`
/// and that said compressed point is actually a point on the curve.
///
/// # Example
///
/// ```
/// # extern crate ed25519_dalek;
/// # fn main() {
/// use ed25519_dalek::PublicKey;
/// use ed25519_dalek::PUBLIC_KEY_LENGTH;
///
/// let public_key_bytes: [u8; PUBLIC_KEY_LENGTH] = [
/// 215, 90, 152, 1, 130, 177, 10, 183, 213, 75, 254, 211, 201, 100, 7, 58,
/// 14, 225, 114, 243, 218, 166, 35, 37, 175, 2, 26, 104, 247, 7, 81, 26];
///
/// let public_key: PublicKey = PublicKey::from_bytes(&public_key_bytes);
/// # }
/// ```
///
/// # Returns
///
/// A `PublicKey`.
#[inline]
pub fn from_bytes(bytes: &[u8]) -> PublicKey {
PublicKey(CompressedEdwardsY(*array_ref!(bytes, 0, 32)))
}
/// Convert this public key to its underlying extended twisted Edwards coordinate.
#[inline]
fn decompress(&self) -> Option<ExtendedPoint> {
self.0.decompress()
}
/// Derive this public key from its corresponding `SecretKey`.
#[cfg(feature = "std")]
#[allow(unused_assignments)]
pub fn from_secret<D>(secret_key: &SecretKey) -> PublicKey
where D: Digest<OutputSize = U64> + Default {
let mut h: D = D::default();
let mut hash: [u8; 64] = [0u8; 64];
let pk: [u8; 32];
let mut digest: &mut | return true;
} e | conditional_block |
|
asset.go | (name string) []byte {
s, err := afs.GetAssetFile(name)
if err != nil {
return []byte("")
}
return s.Content()
}
// GetFileNames get all file names
func (afs *assetFiles) GetFileNames(dir string) []string {
if dir == "" {
dir = "/"
}
names := make([]string, 0, len(afs.Files))
dirRaw := dir
dir = path.Clean(dir)
if dir != "/" && strings.HasSuffix(dirRaw, "/") {
dir += string(filepath.Separator)
}
dir = filepath.ToSlash(dir)
for name := range afs.Files {
if strings.HasPrefix(name, dir) {
names = append(names, name)
}
}
return names
}
// FileHandlerFunc handler http files
// 若目录名称 为 *private 则不允许通过web访问
func (afs *assetFiles) FileHandlerFunc(name string) http.HandlerFunc {
if strings.Contains(name, "private/") {
return http.NotFound
}
return afs.FileHandlerFuncAll(name)
}
// FileHandlerFuncAll handler http files
// 无 private 目录规则
func (afs *assetFiles) FileHandlerFuncAll(name string) http.HandlerFunc {
name = filepath.ToSlash(name)
file, err := afs.GetAssetFile(name)
return func(writer http.ResponseWriter, req *http.Request) {
if err != nil {
http.NotFound(writer, req)
return
}
modifiedSince := req.Header.Get("If-Modified-Since")
if modifiedSince != "" {
t, err := time.Parse(http.TimeFormat, modifiedSince)
if err == nil && file.ModTime().Before(t) {
writer.Header().Del("Content-Type")
writer.Header().Del("Content-Length")
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
writer.WriteHeader(http.StatusNotModified)
return
}
}
mimeType := mime.TypeByExtension(filepath.Ext(file.Name()))
if mimeType != "" {
writer.Header().Set("Content-Type", mimeType)
}
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
gzipContent := file.ContentGzip()
var errWrote error
if len(gzipContent) > 0 && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
writer.Header().Set("Content-Encoding", "gzip")
_, errWrote = writer.Write(gzipContent)
} else {
_, errWrote = writer.Write(file.Content())
}
if errWrote != nil {
log.Printf("[wf] wrote %q with error:%s\n", name, errWrote)
}
}
}
// HTTPHandler handler http request
// eg:on file system is :/res/js/a.js and request is /res/js/a.js
// http.Handle("/res/",res.Asset.HttpHandler("/"))
// eg:on file system is :/res/js/a.js and request is /js/a.js
// http.Handle("/js/",res.Asset.HttpHandler("/res/"))
func (afs *assetFiles) HTTPHandler(baseDir string) http.Handler {
return &_assetFileServer{sf: afs, pdir: baseDir}
}
type _assetFileServer struct {
sf *assetFiles
pdir string
}
// ServeHTTP ServeHTTP
func (f *_assetFileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
name := filepath.ToSlash(filepath.Join(f.pdir, r.URL.Path))
f.sf.FileHandlerFunc(name).ServeHTTP(w, r)
}
var _ AssetFiles = &assetFiles{}
var _ = flag.String
var _ = runtime.Version()
// ---------------------------helper.go--------begin--------------------------//
func newAssetHelper() *assetHelper {
helper := &assetHelper{}
helper.Regs = make(map[string]*regexp.Regexp)
helper.Regs["remove_above"] = regexp.MustCompile(`[\S\s]*?//\s*asset_remove_above\(\s*\)`)
helper.Regs["remove"] = regexp.MustCompile(`//\s*asset_remove_start\(\s*\)[\S\s]*?//\s*asset_remove_end\(\s*\)`)
helper.Regs["include"] = regexp.MustCompile(`//\s*asset_include\(([^)]+?)\)`)
helper.RegisterFn("remove_above", helper.RemoveAbove)
helper.RegisterFn("include", helper.Include)
helper.RegisterFn("remove", helper.Remove)
return helper
}
type assetHelperFn func(fileName string, content []byte) ([]byte, error)
type assetHelper struct {
Fns []map[string]assetHelperFn
Regs map[string]*regexp.Regexp
}
// RegisterFn 注册helper方法
func (h *assetHelper) RegisterFn(name string, fn assetHelperFn) {
h.Fns = append(h.Fns, map[string]assetHelperFn{name: fn})
}
// Execute 执行所有的helper方法
func (h *assetHelper) Execute(fileAbsPath string, content []byte, skipFnName string) (contentNew []byte, err error) {
contentNew = make([]byte, len(content))
copy(contentNew, content)
for _, fnInfo := range h.Fns {
for name, fn := range fnInfo {
if name == skipFnName {
continue
}
contentNew, err = fn(fileAbsPath, contentNew)
if err != nil {
return nil, fmt.Errorf("%s,current file is: %s", err.Error(), fileAbsPath)
}
}
}
return contentNew, nil
}
// RemoveAbove 删除在此标记之前的内容
// eg: \/\/ asset_remove_above()
func (h *assetHelper) RemoveAbove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove_above"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
// Remove remove 方法, 删除指定区间里的内容
// eg: \/\/asset_remove_start() 中间的内容被删除 \/\/ asset_remove_end()
func (h *assetHelper) Remove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
func (h *assetHelper) include(fileAPath string, content []byte, includeFiles map[string]map[string]bool) (contentNew []byte, err error) {
fileAPath = filepath.Clean(fileAPath)
includeFiles[fileAPath] = make(map[string]bool)
contentNew = h.Regs["include"].ReplaceAllFunc(content, func(matchData []byte) []byte {
idx := bytes.Index(matchData, []byte("("))
name := bytes.TrimSpace(matchData[idx+1 : len(matchData)-1])
if len(name) == 0 {
err = fmt.Errorf("asset_include with empty param")
return []byte(err.Error())
}
fileBPath := filepath.Join(filepath.Dir(fileAPath), string(name))
if bFiles, hasB := includeFiles[fileBPath]; hasB {
if _, hasA := bFiles[fileAPath]; hasA {
err = fmt.Errorf("asset_include error: cyclic include,%s include(%s)", fileAPath, string(name))
return []byte(err.Error())
}
}
includeFiles[fileAPath][fileBPath] = true
includeFiles[fileBPath] = make(map[string]bool)
bContent, errRead := ioutil.ReadFile(fileBPath)
if errRead != nil {
err = errRead
return []byte(err.Error())
}
b1Content, errB1 := h.Execute(fileBPath, bContent, "include")
if errB1 != nil {
err = errB1
return []byte(err.Error())
}
cContent, errInclude := h.include(fileBPath, b1Content, includeFiles)
if errInclude != nil {
err = errInclude
return []byte(err.Error())
}
return cContent
})
if err != nil {
return nil, err
}
return contentNew, nil
}
// Include 将另外一个资源文件包含到当前文件里
// eg: \/\/ asset_include(a.tpl)
func (h *assetHelper) Include(fileAPath string, content []byte) (contentNew []byte, err error) {
// 用于检查循环include
includeFiles := make(map[string]map[string]bool)
return h.include(fileAPath, content, includeFiles)
}
// ---------------------------helper.go--------finish-------------------------//
// Asset export assets
var Asset AssetFiles
func init() {
// nolint
var _assetGzipDecode = func(data []byte, fileName string) []byte {
gzipReader, errGzip := gzip.NewReader(bytes.NewBuffer(data))
if errGzip != nil {
panic(fmt.Sprintf("[goasset] gzip decode failed,file=%q, err=%s", fileName, errGzip.Error()))
}
defer gzipReader.Close()
buf, errReader := ioutil.ReadAll(gzipReader)
if errReader != nil {
panic(fmt.Sprintf("[goasset] read decode content failed, file=%q err=%s", fileName, errReader.Error()))
}
return buf
}
// nolint
var _assetBase64Decode = func(txt string, fileName string) []byte {
txt = strings.ReplaceAll(txt, "\n", "")
bf, err := base64.StdEncoding.DecodeString | GetContent | identifier_name |
|
asset.go |
helper := newAssetHelper()
contentNew, errHelper := helper.Execute(assetFilePath, content, "")
if errHelper != nil {
return nil, errHelper
}
return &assetFile{
content: contentNew,
name: name,
mtime: info.ModTime(),
}, nil
}
return nil, fmt.Errorf("not file")
}
if sf, has := afs.Files[name]; has {
return sf, nil
}
return nil, fmt.Errorf("not exists")
}
// GetContent get content by name
func (afs *assetFiles) GetContent(name string) []byte {
s, err := afs.GetAssetFile(name)
if err != nil {
return []byte("")
}
return s.Content()
}
// GetFileNames get all file names
func (afs *assetFiles) GetFileNames(dir string) []string {
if dir == "" {
dir = "/"
}
names := make([]string, 0, len(afs.Files))
dirRaw := dir
dir = path.Clean(dir)
if dir != "/" && strings.HasSuffix(dirRaw, "/") {
dir += string(filepath.Separator)
}
dir = filepath.ToSlash(dir)
for name := range afs.Files {
if strings.HasPrefix(name, dir) {
names = append(names, name)
}
}
return names
}
// FileHandlerFunc handler http files
// 若目录名称 为 *private 则不允许通过web访问
func (afs *assetFiles) FileHandlerFunc(name string) http.HandlerFunc {
if strings.Contains(name, "private/") {
return http.NotFound
}
return afs.FileHandlerFuncAll(name)
}
// FileHandlerFuncAll handler http files
// 无 private 目录规则
func (afs *assetFiles) FileHandlerFuncAll(name string) http.HandlerFunc {
name = filepath.ToSlash(name)
file, err := afs.GetAssetFile(name)
return func(writer http.ResponseWriter, req *http.Request) {
if err != nil {
http.NotFound(writer, req)
return
}
modifiedSince := req.Header.Get("If-Modified-Since")
if modifiedSince != "" {
t, err := time.Parse(http.TimeFormat, modifiedSince)
if err == nil && file.ModTime().Before(t) {
writer.Header().Del("Content-Type")
writer.Header().Del("Content-Length")
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
writer.WriteHeader(http.StatusNotModified)
return
}
}
mimeType := mime.TypeByExtension(filepath.Ext(file.Name()))
if mimeType != "" {
writer.Header().Set("Content-Type", mimeType)
}
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
gzipContent := file.ContentGzip()
var errWrote error
if len(gzipContent) > 0 && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
writer.Header().Set("Content-Encoding", "gzip")
_, errWrote = writer.Write(gzipContent)
} else {
_, errWrote = writer.Write(file.Content())
}
if errWrote != nil {
log.Printf("[wf] wrote %q with error:%s\n", name, errWrote)
}
}
}
// HTTPHandler handler http request
// eg:on file system is :/res/js/a.js and request is /res/js/a.js
// http.Handle("/res/",res.Asset.HttpHandler("/"))
// eg:on file system is :/res/js/a.js and request is /js/a.js
// http.Handle("/js/",res.Asset.HttpHandler("/res/"))
func (afs *assetFiles) HTTPHandler(baseDir string) http.Handler {
return &_assetFileServer{sf: afs, pdir: baseDir}
}
type _assetFileServer struct {
sf *assetFiles
pdir string
}
// ServeHTTP ServeHTTP
func (f *_assetFileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
name := filepath.ToSlash(filepath.Join(f.pdir, r.URL.Path))
f.sf.FileHandlerFunc(name).ServeHTTP(w, r)
}
var _ AssetFiles = &assetFiles{}
var _ = flag.String
var _ = runtime.Version()
// ---------------------------helper.go--------begin--------------------------//
func newAssetHelper() *assetHelper {
helper := &assetHelper{}
helper.Regs = make(map[string]*regexp.Regexp)
helper.Regs["remove_above"] = regexp.MustCompile(`[\S\s]*?//\s*asset_remove_above\(\s*\)`)
helper.Regs["remove"] = regexp.MustCompile(`//\s*asset_remove_start\(\s*\)[\S\s]*?//\s*asset_remove_end\(\s*\)`)
helper.Regs["include"] = regexp.MustCompile(`//\s*asset_include\(([^)]+?)\)`)
helper.RegisterFn("remove_above", helper.RemoveAbove)
helper.RegisterFn("include", helper.Include)
helper.RegisterFn("remove", helper.Remove)
return helper
}
type assetHelperFn func(fileName string, content []byte) ([]byte, error)
type assetHelper struct {
Fns []map[string]assetHelperFn
Regs map[string]*regexp.Regexp
}
// RegisterFn 注册helper方法
func (h *assetHelper) RegisterFn(name string, fn assetHelperFn) {
h.Fns = append(h.Fns, map[string]assetHelperFn{name: fn})
}
// Execute 执行所有的helper方法
func (h *assetHelper) Execute(fileAbsPath string, content []byte, skipFnName string) (contentNew []byte, err error) {
contentNew = make([]byte, len(content))
copy(contentNew, content)
for _, fnInfo := range h.Fns {
for name, fn := range fnInfo {
if name == skipFnName {
continue
}
contentNew, err = fn(fileAbsPath, contentNew)
if err != nil {
return nil, fmt.Errorf("%s,current file is: %s", err.Error(), fileAbsPath)
}
}
}
return contentNew, nil
}
// RemoveAbove 删除在此标记之前的内容
// eg: \/\/ asset_remove_above()
func (h *assetHelper) RemoveAbove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove_above"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
// Remove remove 方法, 删除指定区间里的内容
// eg: \/\/asset_remove_start() 中间的内容被删除 \/\/ asset_remove_end()
func (h *assetHelper) Remove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
func (h *assetHelper) include(fileAPath string, content []byte, includeFiles map[string]map[string]bool) (contentNew []byte, err error) {
fileAPath = filepath.Clean(fileAPath)
includeFiles[fileAPath] = make(map[string]bool)
contentNew = h.Regs["include"].ReplaceAllFunc(content, func(matchData []byte) []byte {
idx := bytes.Index(matchData, []byte("("))
name := bytes.TrimSpace(matchData[idx+1 : len(matchData)-1])
if len(name) == 0 {
err = fmt.Errorf("asset_include with empty param")
return []byte(err.Error())
}
fileBPath := filepath.Join(filepath.Dir(fileAPath), string(name))
if bFiles, hasB := includeFiles[fileBPath]; hasB {
if _, hasA := bFiles[fileAPath]; hasA {
err = fmt.Errorf("asset_include error: cyclic include,%s include(%s)", fileAPath, string(name))
return []byte(err.Error())
}
}
includeFiles[fileAPath][fileBPath] = true
includeFiles[fileBPath] = make(map[string]bool)
bContent, errRead := ioutil.ReadFile(fileBPath)
if errRead != nil {
err = errRead
return []byte(err.Error())
}
b1Content, errB1 := h.Execute(fileBPath, bContent, "include")
if errB1 != nil {
err = errB1
return []byte(err.Error())
}
cContent, errInclude := h.include(fileBPath, b1Content, includeFiles)
if errInclude != nil {
err = errInclude
return []byte(err.Error())
}
return cContent
})
if err != nil {
return nil, err
}
return contentNew, nil
}
// Include 将另外一个资源文件包含到当前文件里
// eg: \/\/ asset_include(a.tpl)
func (h *assetHelper) Include(fileAPath string, content []byte) (contentNew []byte, err error) {
// 用于检查循环include
includeFiles := make(map[string]map[string]bool)
return h.include(fileAPath, content, includeFiles)
}
// ---------------------------helper.go--------finish-------------------------//
// Asset export assets
var Asset AssetFiles
func init() {
// nolint
var _assetGzipDecode = func(data []byte, fileName string) []byte {
gzipReader, errGzip := gzip.NewReader(bytes.NewBuffer(data))
if errG | {
return nil, err
} | conditional_block |
|
asset.go | dir = "/"
}
names := make([]string, 0, len(afs.Files))
dirRaw := dir
dir = path.Clean(dir)
if dir != "/" && strings.HasSuffix(dirRaw, "/") {
dir += string(filepath.Separator)
}
dir = filepath.ToSlash(dir)
for name := range afs.Files {
if strings.HasPrefix(name, dir) {
names = append(names, name)
}
}
return names
}
// FileHandlerFunc handler http files
// 若目录名称 为 *private 则不允许通过web访问
func (afs *assetFiles) FileHandlerFunc(name string) http.HandlerFunc {
if strings.Contains(name, "private/") {
return http.NotFound
}
return afs.FileHandlerFuncAll(name)
}
// FileHandlerFuncAll handler http files
// 无 private 目录规则
func (afs *assetFiles) FileHandlerFuncAll(name string) http.HandlerFunc {
name = filepath.ToSlash(name)
file, err := afs.GetAssetFile(name)
return func(writer http.ResponseWriter, req *http.Request) {
if err != nil {
http.NotFound(writer, req)
return
}
modifiedSince := req.Header.Get("If-Modified-Since")
if modifiedSince != "" {
t, err := time.Parse(http.TimeFormat, modifiedSince)
if err == nil && file.ModTime().Before(t) {
writer.Header().Del("Content-Type")
writer.Header().Del("Content-Length")
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
writer.WriteHeader(http.StatusNotModified)
return
}
}
mimeType := mime.TypeByExtension(filepath.Ext(file.Name()))
if mimeType != "" {
writer.Header().Set("Content-Type", mimeType)
}
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
gzipContent := file.ContentGzip()
var errWrote error
if len(gzipContent) > 0 && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
writer.Header().Set("Content-Encoding", "gzip")
_, errWrote = writer.Write(gzipContent)
} else {
_, errWrote = writer.Write(file.Content())
}
if errWrote != nil {
log.Printf("[wf] wrote %q with error:%s\n", name, errWrote)
}
}
}
// HTTPHandler handler http request
// eg:on file system is :/res/js/a.js and request is /res/js/a.js
// http.Handle("/res/",res.Asset.HttpHandler("/"))
// eg:on file system is :/res/js/a.js and request is /js/a.js
// http.Handle("/js/",res.Asset.HttpHandler("/res/"))
func (afs *assetFiles) HTTPHandler(baseDir string) http.Handler {
return &_assetFileServer{sf: afs, pdir: baseDir}
}
type _assetFileServer struct {
sf *assetFiles
pdir string
}
// ServeHTTP ServeHTTP
func (f *_assetFileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
name := filepath.ToSlash(filepath.J | r _ = flag.String
var _ = runtime.Version()
// ---------------------------helper.go--------begin--------------------------//
func newAssetHelper() *assetHelper {
helper := &assetHelper{}
helper.Regs = make(map[string]*regexp.Regexp)
helper.Regs["remove_above"] = regexp.MustCompile(`[\S\s]*?//\s*asset_remove_above\(\s*\)`)
helper.Regs["remove"] = regexp.MustCompile(`//\s*asset_remove_start\(\s*\)[\S\s]*?//\s*asset_remove_end\(\s*\)`)
helper.Regs["include"] = regexp.MustCompile(`//\s*asset_include\(([^)]+?)\)`)
helper.RegisterFn("remove_above", helper.RemoveAbove)
helper.RegisterFn("include", helper.Include)
helper.RegisterFn("remove", helper.Remove)
return helper
}
type assetHelperFn func(fileName string, content []byte) ([]byte, error)
type assetHelper struct {
Fns []map[string]assetHelperFn
Regs map[string]*regexp.Regexp
}
// RegisterFn 注册helper方法
func (h *assetHelper) RegisterFn(name string, fn assetHelperFn) {
h.Fns = append(h.Fns, map[string]assetHelperFn{name: fn})
}
// Execute 执行所有的helper方法
func (h *assetHelper) Execute(fileAbsPath string, content []byte, skipFnName string) (contentNew []byte, err error) {
contentNew = make([]byte, len(content))
copy(contentNew, content)
for _, fnInfo := range h.Fns {
for name, fn := range fnInfo {
if name == skipFnName {
continue
}
contentNew, err = fn(fileAbsPath, contentNew)
if err != nil {
return nil, fmt.Errorf("%s,current file is: %s", err.Error(), fileAbsPath)
}
}
}
return contentNew, nil
}
// RemoveAbove 删除在此标记之前的内容
// eg: \/\/ asset_remove_above()
func (h *assetHelper) RemoveAbove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove_above"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
// Remove remove 方法, 删除指定区间里的内容
// eg: \/\/asset_remove_start() 中间的内容被删除 \/\/ asset_remove_end()
func (h *assetHelper) Remove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
func (h *assetHelper) include(fileAPath string, content []byte, includeFiles map[string]map[string]bool) (contentNew []byte, err error) {
fileAPath = filepath.Clean(fileAPath)
includeFiles[fileAPath] = make(map[string]bool)
contentNew = h.Regs["include"].ReplaceAllFunc(content, func(matchData []byte) []byte {
idx := bytes.Index(matchData, []byte("("))
name := bytes.TrimSpace(matchData[idx+1 : len(matchData)-1])
if len(name) == 0 {
err = fmt.Errorf("asset_include with empty param")
return []byte(err.Error())
}
fileBPath := filepath.Join(filepath.Dir(fileAPath), string(name))
if bFiles, hasB := includeFiles[fileBPath]; hasB {
if _, hasA := bFiles[fileAPath]; hasA {
err = fmt.Errorf("asset_include error: cyclic include,%s include(%s)", fileAPath, string(name))
return []byte(err.Error())
}
}
includeFiles[fileAPath][fileBPath] = true
includeFiles[fileBPath] = make(map[string]bool)
bContent, errRead := ioutil.ReadFile(fileBPath)
if errRead != nil {
err = errRead
return []byte(err.Error())
}
b1Content, errB1 := h.Execute(fileBPath, bContent, "include")
if errB1 != nil {
err = errB1
return []byte(err.Error())
}
cContent, errInclude := h.include(fileBPath, b1Content, includeFiles)
if errInclude != nil {
err = errInclude
return []byte(err.Error())
}
return cContent
})
if err != nil {
return nil, err
}
return contentNew, nil
}
// Include 将另外一个资源文件包含到当前文件里
// eg: \/\/ asset_include(a.tpl)
func (h *assetHelper) Include(fileAPath string, content []byte) (contentNew []byte, err error) {
// 用于检查循环include
includeFiles := make(map[string]map[string]bool)
return h.include(fileAPath, content, includeFiles)
}
// ---------------------------helper.go--------finish-------------------------//
// Asset export assets
var Asset AssetFiles
func init() {
// nolint
var _assetGzipDecode = func(data []byte, fileName string) []byte {
gzipReader, errGzip := gzip.NewReader(bytes.NewBuffer(data))
if errGzip != nil {
panic(fmt.Sprintf("[goasset] gzip decode failed,file=%q, err=%s", fileName, errGzip.Error()))
}
defer gzipReader.Close()
buf, errReader := ioutil.ReadAll(gzipReader)
if errReader != nil {
panic(fmt.Sprintf("[goasset] read decode content failed, file=%q err=%s", fileName, errReader.Error()))
}
return buf
}
// nolint
var _assetBase64Decode = func(txt string, fileName string) []byte {
txt = strings.ReplaceAll(txt, "\n", "")
bf, err := base64.StdEncoding.DecodeString(txt)
if err != nil {
panic(fmt.Sprintf("[goasset](%s) base64 decode failed, err=%s", fileName, err.Error()))
}
return bf
}
asset := &assetFiles{Files: map[string]*assetFile{}}
Asset = asset | oin(f.pdir, r.URL.Path))
f.sf.FileHandlerFunc(name).ServeHTTP(w, r)
}
var _ AssetFiles = &assetFiles{}
va | identifier_body |
asset.go |
// assetFiles asset files
type assetFiles struct {
Files map[string]*assetFile
}
var _assetDirect bool
var _assetCwd, _ = os.Getwd()
// GetAssetFile get file by name
func (afs *assetFiles) GetAssetFile(name string) (AssetFile, error) {
name = filepath.ToSlash(name)
if name != "" && name[0] != '/' {
name = "/" + name
}
if _assetDirect {
assetFilePath := filepath.Join(_assetCwd, name)
f, err := os.Open(assetFilePath)
log.Println("[goasset] Asset Direct, name=", name, "assetPath=", assetFilePath, "err=", err)
if err != nil {
return nil, err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, err
}
if info.Mode().IsRegular() {
content, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
helper := newAssetHelper()
contentNew, errHelper := helper.Execute(assetFilePath, content, "")
if errHelper != nil {
return nil, errHelper
}
return &assetFile{
content: contentNew,
name: name,
mtime: info.ModTime(),
}, nil
}
return nil, fmt.Errorf("not file")
}
if sf, has := afs.Files[name]; has {
return sf, nil
}
return nil, fmt.Errorf("not exists")
}
// GetContent get content by name
func (afs *assetFiles) GetContent(name string) []byte {
s, err := afs.GetAssetFile(name)
if err != nil {
return []byte("")
}
return s.Content()
}
// GetFileNames get all file names
func (afs *assetFiles) GetFileNames(dir string) []string {
if dir == "" {
dir = "/"
}
names := make([]string, 0, len(afs.Files))
dirRaw := dir
dir = path.Clean(dir)
if dir != "/" && strings.HasSuffix(dirRaw, "/") {
dir += string(filepath.Separator)
}
dir = filepath.ToSlash(dir)
for name := range afs.Files {
if strings.HasPrefix(name, dir) {
names = append(names, name)
}
}
return names
}
// FileHandlerFunc handler http files
// 若目录名称 为 *private 则不允许通过web访问
func (afs *assetFiles) FileHandlerFunc(name string) http.HandlerFunc {
if strings.Contains(name, "private/") {
return http.NotFound
}
return afs.FileHandlerFuncAll(name)
}
// FileHandlerFuncAll handler http files
// 无 private 目录规则
func (afs *assetFiles) FileHandlerFuncAll(name string) http.HandlerFunc {
name = filepath.ToSlash(name)
file, err := afs.GetAssetFile(name)
return func(writer http.ResponseWriter, req *http.Request) {
if err != nil {
http.NotFound(writer, req)
return
}
modifiedSince := req.Header.Get("If-Modified-Since")
if modifiedSince != "" {
t, err := time.Parse(http.TimeFormat, modifiedSince)
if err == nil && file.ModTime().Before(t) {
writer.Header().Del("Content-Type")
writer.Header().Del("Content-Length")
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
writer.WriteHeader(http.StatusNotModified)
return
}
}
mimeType := mime.TypeByExtension(filepath.Ext(file.Name()))
if mimeType != "" {
writer.Header().Set("Content-Type", mimeType)
}
writer.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
gzipContent := file.ContentGzip()
var errWrote error
if len(gzipContent) > 0 && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
writer.Header().Set("Content-Encoding", "gzip")
_, errWrote = writer.Write(gzipContent)
} else {
_, errWrote = writer.Write(file.Content())
}
if errWrote != nil {
log.Printf("[wf] wrote %q with error:%s\n", name, errWrote)
}
}
}
// HTTPHandler handler http request
// eg:on file system is :/res/js/a.js and request is /res/js/a.js
// http.Handle("/res/",res.Asset.HttpHandler("/"))
// eg:on file system is :/res/js/a.js and request is /js/a.js
// http.Handle("/js/",res.Asset.HttpHandler("/res/"))
func (afs *assetFiles) HTTPHandler(baseDir string) http.Handler {
return &_assetFileServer{sf: afs, pdir: baseDir}
}
type _assetFileServer struct {
sf *assetFiles
pdir string
}
// ServeHTTP ServeHTTP
func (f *_assetFileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
name := filepath.ToSlash(filepath.Join(f.pdir, r.URL.Path))
f.sf.FileHandlerFunc(name).ServeHTTP(w, r)
}
var _ AssetFiles = &assetFiles{}
var _ = flag.String
var _ = runtime.Version()
// ---------------------------helper.go--------begin--------------------------//
func newAssetHelper() *assetHelper {
helper := &assetHelper{}
helper.Regs = make(map[string]*regexp.Regexp)
helper.Regs["remove_above"] = regexp.MustCompile(`[\S\s]*?//\s*asset_remove_above\(\s*\)`)
helper.Regs["remove"] = regexp.MustCompile(`//\s*asset_remove_start\(\s*\)[\S\s]*?//\s*asset_remove_end\(\s*\)`)
helper.Regs["include"] = regexp.MustCompile(`//\s*asset_include\(([^)]+?)\)`)
helper.RegisterFn("remove_above", helper.RemoveAbove)
helper.RegisterFn("include", helper.Include)
helper.RegisterFn("remove", helper.Remove)
return helper
}
type assetHelperFn func(fileName string, content []byte) ([]byte, error)
type assetHelper struct {
Fns []map[string]assetHelperFn
Regs map[string]*regexp.Regexp
}
// RegisterFn 注册helper方法
func (h *assetHelper) RegisterFn(name string, fn assetHelperFn) {
h.Fns = append(h.Fns, map[string]assetHelperFn{name: fn})
}
// Execute 执行所有的helper方法
func (h *assetHelper) Execute(fileAbsPath string, content []byte, skipFnName string) (contentNew []byte, err error) {
contentNew = make([]byte, len(content))
copy(contentNew, content)
for _, fnInfo := range h.Fns {
for name, fn := range fnInfo {
if name == skipFnName {
continue
}
contentNew, err = fn(fileAbsPath, contentNew)
if err != nil {
return nil, fmt.Errorf("%s,current file is: %s", err.Error(), fileAbsPath)
}
}
}
return contentNew, nil
}
// RemoveAbove 删除在此标记之前的内容
// eg: \/\/ asset_remove_above()
func (h *assetHelper) RemoveAbove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove_above"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
// Remove remove 方法, 删除指定区间里的内容
// eg: \/\/asset_remove_start() 中间的内容被删除 \/\/ asset_remove_end()
func (h *assetHelper) Remove(fileAbsPath string, content []byte) (contentNew []byte, err error) {
contentNew = h.Regs["remove"].ReplaceAll(content, []byte(""))
return contentNew, nil
}
func (h *assetHelper) include(fileAPath string, content []byte, includeFiles map[string]map[string]bool) (contentNew []byte, err error) {
fileAPath = filepath.Clean(fileAPath)
includeFiles[fileAPath] = make(map[string]bool)
contentNew = h.Regs["include"].ReplaceAllFunc(content, func(matchData []byte) []byte {
idx := bytes.Index(matchData, []byte("("))
name := bytes.TrimSpace(matchData[idx+1 : len(matchData)-1])
if len(name) == 0 {
err = fmt.Errorf("asset_include with empty param")
return []byte(err.Error())
}
fileBPath := filepath.Join(filepath.Dir(fileAPath), string(name))
if bFiles, hasB := includeFiles[fileBPath]; hasB {
if _, hasA := bFiles[fileAPath]; hasA {
err = fmt.Errorf("asset_include error: cyclic include,%s include(%s)", fileAPath, string(name))
return []byte(err.Error())
}
}
includeFiles[fileAPath][fileBPath] = true
includeFiles[fileBPath] = make(map[string]bool)
bContent, errRead := ioutil.ReadFile(fileBPath)
if errRead != nil {
err = errRead
return []byte(err.Error())
}
b1Content, errB1 := h.Execute(fileBPath, bContent, "include")
if errB1 != nil {
err = errB1
return []byte(err.Error())
}
cContent, | random_line_split |
||
Measurements.py | "related" objects to be augmented
index (str): token index of related word
connector (str): if related word is cousin of unit (not sibling) then connecter is word between
Returns:
list: all related words for a given measurement (augmented with new 'related' passed in)
"""
doc = {}
doc["relationForm"] = dep
doc["rawName"] = related
doc["tokenIndex"] = int(index)
doc["offsetStart"] = A.lookup[int(index)]["start"]
doc["offsetEnd"] = A.lookup[int(index)]["end"]
doc["connector"] = "" if connector is None else connector
if not doc in all_related:
all_related.append(doc)
return all_related
def | (related):
"""For related words found for a measurement (usually nouns), add any connected adjectives, compounds, or modifiers.
Args:
related (list): objects containing related words and their metadata
Returns:
list: original list of related objects augmented with additional descriptor words
"""
for r in related:
r["descriptors"] = []
for edge in G.edges(data=True):
sibling_idx = _get_connected(edge, r["tokenIndex"])
if sibling_idx and (A.lookup[int(sibling_idx)]["pos"] == "JJ" or edge[2]["dep"] in ["amod", "compound"]):
r["descriptors"].append(
{
"tokenIndex": sibling_idx,
"rawName": A.lookup[int(sibling_idx)]["word"]
}
)
if sibling_idx and "NN" in A.lookup[int(sibling_idx)]["pos"] and "amod" in edge[2]["dep"]:
additional_related = _get_cousin(sibling_idx, ["nmod"])
for add in set(additional_related):
related = _add_related(add, "nmod", related, A.index_lookup[add],
connector=G.nodes[sibling_idx]['word'])
return related
def _check_criteria(dep, dep_obj, all_related, edge, sibling_idx):
""" If measurement is found, runs processed sentence through valid dependency patterns
(from JSON file) to find additional words related to measurements
Args:
dep (str): dependency type present in dependency patterns JSON
dep_obj (dict): object containing accepted POS types and measurement formats for a given dependency type
all_related (list): contains words related to a measurement to be augmented if valid pattern is found
edge (list): Connected node (word) indices and dependency type between
sibling_idx (str): token index of word connected to unit
Returns:
list: related words and metadata
"""
# Check for a matching dependency type
related = []
if edge[2]["dep"] == dep:
# Check for matching POS type(s)
for pos_logic in dep_obj.keys():
connector = None
if isinstance(dep_obj[pos_logic], dict):
for pos in dep_obj[pos_logic].keys():
# Check for allowed part of speech tags in matched dependency patterns
if (pos_logic == "pos_in" and pos in G.nodes[sibling_idx]["pos"]) or (
pos_logic == "pos_equals" and pos == G.nodes[sibling_idx]["pos"]):
pass
elif pos_logic == "pos_not":
if not [False if not_pos == G.nodes[sibling_idx]["pos"] else True for not_pos in
dep_obj.keys()]: continue
else:
continue
# if no additional checks, have a match
if dep_obj[pos_logic][pos] == None or any(
y in dep_obj[pos_logic][pos] for y in [None, "add_sibling"]):
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']])
# if additional checks are required, process further
if dep_obj[pos_logic][pos]:
if "get_cousin" in dep_obj[pos_logic][pos]:
related.extend(_get_cousin(sibling_idx, dep_obj[pos_logic][pos]["get_cousin"]))
connector = G.nodes[sibling_idx]['word']
if "special" in dep_obj[pos_logic][pos]:
if dep == "compound" and pos == "NN":
related = [G.nodes[sibling_idx]['word']]
if None in related:
related.remove(None)
# Allows for getting cousin and returning sibling
if "else" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos]["else"] == "always":
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)
if len(related) > 0 and isinstance(related, list):
for x in related:
if x != None:
all_related = _add_related(x, dep, all_related, A.index_lookup[x],
connector=connector)
elif "else" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos]["else"] == True:
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)
return all_related
def _parse_patterns(unit_idx, measurement_format, patterns_file):
""" Loads depedency patters JSON file and uses "_check_criteria" to look for words related to measurement (connected via unit token)
Args:
unit_idx (list): index or indices of measurement unit token(s)
measurement_format (str): indicates form of measurement value + unit (attached: 10m, space between: 10 m, hyphenated: 10-m)
Returns:
list: related words and metadata
"""
all_related = []
for edge in G.edges(data=True):
for idx in unit_idx:
sibling_idx = _get_connected(edge, idx)
if sibling_idx:
with open(os.path.join(basedir, patterns_file), "r") as tree:
tree = json.load(tree)
for dep in tree["dep"].keys():
if tree["dep"][dep]["enhanced"] == True:
for inner_dep in tree["dep"][dep].keys():
if isinstance(tree["dep"][dep][inner_dep], dict) and measurement_format in \
tree["dep"][dep][inner_dep]["measurement_types"]:
full_dep = dep + ":" + inner_dep
full_dep_obj = tree["dep"][dep][inner_dep]
all_related = _check_criteria(full_dep, full_dep_obj, all_related, edge,
sibling_idx)
elif measurement_format in tree["dep"][dep]["measurement_types"]:
all_related = _check_criteria(dep, tree["dep"][dep], all_related, edge, sibling_idx)
for x in range(0, len(tree["word"]["or"])):
if G.nodes[sibling_idx]["word"] == tree["word"]["or"][x]:
related = _get_cousin(sibling_idx, ["nsubj"])
for r in related:
all_related = _add_related(r, "operator", all_related, A.index_lookup[r])
all_related = _add_descriptors(all_related)
return all_related
def _get_related(stats, match, patterns_file):
""" Calls _parse_patterns() to get words related to a measurement and provides de-duplication between related words and grobid response
Args:
stats (Stats object): Global object used to track parsing behaviors
match (dict): information on measurements and units extracted by Grobid
Returns:
list: related words and metadata
"""
all_related = None
measurement_formats = ["space_between", "attached", "hyphenated"]
all_related = _parse_patterns(match["unit_idx"], match["measurement_format"], patterns_file)
if all_related == None:
all_related = _parse_patterns(match["unit_idx"], ["uncertain"], patterns_file)
# get words like approximately
num_adverbs = _parse_patterns([match["num_idx"]], match["measurement_format"], patterns_file)
unit_adverbs = _parse_patterns([match["unit_idx"]], match["measurement_format"], patterns_file)
adverbs = num_adverbs + unit_adverbs
for_removal = []
for a in adverbs:
if a["relationForm"] != "advmod":
for_removal.append(a)
else:
[a.pop(key, None) for key in ["descriptors", "connector"]] # not relevant for adverbs
[adverbs.remove(a) for a in for_removal]
if adverbs:
match["grobid"]["adverbs"] = adverbs
# Check to make sure related isn't already a number, unit, or quantified thing identified by Grobid
potential_keys = ["quantity", "quantityLeast", "quantityMost", "quantified"]
if all_related:
for key in potential_keys:
for related in all_related:
if key in match["grobid"]:
num, unit, quantified = "", "", ""
if "rawValue" in match["grobid"][key]: num = match["grobid"][key]["rawValue"]
if "rawUnit" in match["grobid"][key]: unit = match["grobid"][key]["rawUnit"]["name"]
if "normalizedName" in match["grobid"][key]: quantified = match["grobid"][key]["normalizedName"]
if related | _add_descriptors | identifier_name |
Measurements.py |
def _get_cousin(sibling_idx, dep_type_list, visited_nodes={}):
"""Find a second degree relation within the dependency graph.
Used to find subject in a sentence when the measurement unit is a direct object, for example.
Args:
sibling_idx (str): Token index of the sibling node through which to find the cousin
dep_type_list (list): Allowable dependency types connecting sibling to cousin
Returns:
list: cousin words meeting POS and dependency criteria
"""
words = [] # Visited nodes prevent recursion from bouncing between two "VB" nodes
for dep_type in dep_type_list:
for edge in G.edges(data=True):
cousin_idx = _get_connected(edge, sibling_idx)
allowed_pos = ["NN", "PR"]
if cousin_idx and dep_type in edge[2]['dep'] and any(
x in A.lookup[int(cousin_idx)]['pos'] for x in allowed_pos):
words.append(G.nodes[cousin_idx]['word'])
# Go to second cousin if cousin is a verb
elif cousin_idx and dep_type in edge[2]['dep'] and "VB" in A.lookup[int(cousin_idx)]['pos'] and (
not cousin_idx in visited_nodes or visited_nodes[cousin_idx] < 2):
words.extend(_get_cousin(cousin_idx, ["nsubj", "nsubjpass", "acl"], visited_nodes=visited_nodes))
if cousin_idx:
if cousin_idx in visited_nodes:
visited_nodes[cousin_idx] += 1
else:
visited_nodes[cousin_idx] = 1
return set(words)
def _add_related(related, dep, all_related, index, connector=None):
"""Adds a word (and its metadata) related to a measurement to the list of all related words for that measurement
Args:
related (str): related token/word
dep (str): the dependency type connecting the unit to the related word
all_related (list): existing list of "related" objects to be augmented
index (str): token index of related word
connector (str): if related word is cousin of unit (not sibling) then connecter is word between
Returns:
list: all related words for a given measurement (augmented with new 'related' passed in)
"""
doc = {}
doc["relationForm"] = dep
doc["rawName"] = related
doc["tokenIndex"] = int(index)
doc["offsetStart"] = A.lookup[int(index)]["start"]
doc["offsetEnd"] = A.lookup[int(index)]["end"]
doc["connector"] = "" if connector is None else connector
if not doc in all_related:
all_related.append(doc)
return all_related
def _add_descriptors(related):
"""For related words found for a measurement (usually nouns), add any connected adjectives, compounds, or modifiers.
Args:
related (list): objects containing related words and their metadata
Returns:
list: original list of related objects augmented with additional descriptor words
"""
for r in related:
r["descriptors"] = []
for edge in G.edges(data=True):
sibling_idx = _get_connected(edge, r["tokenIndex"])
if sibling_idx and (A.lookup[int(sibling_idx)]["pos"] == "JJ" or edge[2]["dep"] in ["amod", "compound"]):
r["descriptors"].append(
{
"tokenIndex": sibling_idx,
"rawName": A.lookup[int(sibling_idx)]["word"]
}
)
if sibling_idx and "NN" in A.lookup[int(sibling_idx)]["pos"] and "amod" in edge[2]["dep"]:
additional_related = _get_cousin(sibling_idx, ["nmod"])
for add in set(additional_related):
related = _add_related(add, "nmod", related, A.index_lookup[add],
connector=G.nodes[sibling_idx]['word'])
return related
def _check_criteria(dep, dep_obj, all_related, edge, sibling_idx):
""" If measurement is found, runs processed sentence through valid dependency patterns
(from JSON file) to find additional words related to measurements
Args:
dep (str): dependency type present in dependency patterns JSON
dep_obj (dict): object containing accepted POS types and measurement formats for a given dependency type
all_related (list): contains words related to a measurement to be augmented if valid pattern is found
edge (list): Connected node (word) indices and dependency type between
sibling_idx (str): token index of word connected to unit
Returns:
list: related words and metadata
"""
# Check for a matching dependency type
related = []
if edge[2]["dep"] == dep:
# Check for matching POS type(s)
for pos_logic in dep_obj.keys():
connector = None
if isinstance(dep_obj[pos_logic], dict):
for pos in dep_obj[pos_logic].keys():
# Check for allowed part of speech tags in matched dependency patterns
if (pos_logic == "pos_in" and pos in G.nodes[sibling_idx]["pos"]) or (
pos_logic == "pos_equals" and pos == G.nodes[sibling_idx]["pos"]):
pass
elif pos_logic == "pos_not":
if not [False if not_pos == G.nodes[sibling_idx]["pos"] else True for not_pos in
dep_obj.keys()]: continue
else:
continue
# if no additional checks, have a match
if dep_obj[pos_logic][pos] == None or any(
y in dep_obj[pos_logic][pos] for y in [None, "add_sibling"]):
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']])
# if additional checks are required, process further
if dep_obj[pos_logic][pos]:
if "get_cousin" in dep_obj[pos_logic][pos]:
related.extend(_get_cousin(sibling_idx, dep_obj[pos_logic][pos]["get_cousin"]))
connector = G.nodes[sibling_idx]['word']
if "special" in dep_obj[pos_logic][pos]:
if dep == "compound" and pos == "NN":
related = [G.nodes[sibling_idx]['word']]
if None in related:
related.remove(None)
# Allows for getting cousin and returning sibling
if "else" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos]["else"] == "always":
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)
if len(related) > 0 and isinstance(related, list):
for x in related:
if x != None:
all_related = _add_related(x, dep, all_related, A.index_lookup[x],
connector=connector)
elif "else" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos]["else"] == True:
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)
return all_related
def _parse_patterns(unit_idx, measurement_format, patterns_file):
""" Loads depedency patters JSON file and uses "_check_criteria" to look for words related to measurement (connected via unit token)
Args:
unit_idx (list): index or indices of measurement unit token(s)
measurement_format (str): indicates form of measurement value + unit (attached: 10m, space between: 10 m, hyphenated: 10-m)
Returns:
list: related words and metadata
"""
all_related = []
for edge in G.edges(data=True):
for idx in unit_idx:
sibling_idx = _get_connected(edge, idx)
if sibling_idx:
with open(os.path.join(basedir, patterns_file), "r") as tree:
tree = json.load(tree)
for dep in tree["dep"].keys():
if tree["dep"][dep]["enhanced"] == True:
for inner_dep in tree["dep"][dep].keys():
if isinstance(tree["dep"][dep][inner_dep], dict) and measurement_format in \
tree["dep"][dep][inner_dep]["measurement_types"]:
full_dep = dep + ":" + inner_dep
full_dep_obj = tree["dep"][dep][inner_dep]
all_related = _check_criteria(full_dep, full_dep_obj, all_related, edge,
sibling_idx)
elif measurement_format in tree["dep"][dep]["measurement_types"]:
all_related = _check | """If an edge connects to a node (word), return the index of the node
Args:
edge (tuple): Contains token indices of two connect words and the dependency type between them - e.g. ('11', '14', {'dep': 'nmod:at'})
idx (int): Token index of word
Returns:
str or None: str if connected word is found in provided edge, None if not
"""
if str(edge[0]) == str(idx) and A.lookup[int(edge[1])]["word"] != Num:
return edge[1]
elif str(edge[1]) == str(idx) and A.lookup[int(edge[0])]["word"] != Num:
return edge[0] | identifier_body |
|
Measurements.py | simplified["related"] = {}
if "quantified" in match:
if simplified["unit"] == "":
simplified["unit"] = match["quantified"]["normalizedName"]
simplified["quantified"][match["quantified"]["normalizedName"]] = []
if "descriptors" in match["quantified"]:
match["quantified"]["descriptors"].sort(key=lambda x: int(x["tokenIndex"]), reverse=False)
for x in match["quantified"]["descriptors"]:
simplified["quantified"][match["quantified"]["normalizedName"]].append(x["rawName"])
if match["related"]:
for r in match["related"]:
simplified["related"][r["rawName"]] = []
if "descriptors" in r:
r["descriptors"].sort(key=lambda x: int(x["tokenIndex"]), reverse=False)
for z in r["descriptors"]:
simplified["related"][r["rawName"]].append(z["rawName"])
return simplified
def _reconstruct_sent(parsed_sentence):
"""Reconstruct sentence from CoreNLP tokens - raw sentence text isn't retained by CoreNLP after sentence splitting and processing
Args:
parsed_sentence (dict): Object containing CoreNLP output
Returns:
str: original sentence
"""
sent = ""
for x in range(0, len(parsed_sentence["tokens"])):
sent += parsed_sentence["tokens"][x]['originalText']
if x + 1 != len(parsed_sentence["tokens"]):
# Use character indices from tokens to ensure correct spacing when reconstructing
num_spaces = parsed_sentence["tokens"][x + 1]["characterOffsetBegin"] - parsed_sentence["tokens"][x][
"characterOffsetEnd"]
for y in range(0, num_spaces):
sent += " "
return sent
def _sorted_dictionary(orig_dict, sort_list):
od = OrderedDict()
for item in sort_list:
if item in orig_dict:
od[item] = orig_dict[item]
return od
#########################################
# Top-Level function
#########################################
def extract(content, corenlp_endpoint, grobid_endpoint, dependency_patterns_file, output_file=None, show_graph=False,
pretty=False, simplify=False):
""" Top-level user interface to parsing measurements and related words
Args:
content (str): sentence or paragraph to be parsed (shouldn't be much larger)
corenlp_endpoint (str): host + port of CoreNLP service (e.g. "http://localhost:9000")
grobid_endpoint (str): host + port of grobid service (e.g. "http://localhost:8080")
dependency_patterns (str): filepath to dependency patterns JSON file
output_file (optional: str): file to write output to
show_graph (bool): Will show network visualization of sentence dependencies if True
pretty (bool): JSON output will be pretty printed if True, else one JSON doc per line (JSONL)
simplify (bool): If True provides bare bones output with only extractions and not metadata about indices, types, etc.
Returns:
List of objects: containing parsed measurement info
(optionally write to file)
"""
all_extractions = []
out = None
if output_file:
out = codecs.open(output_file, "a", encoding="utf-8")
if len(content) < 5:
return None
nlp = StanfordCoreNLP(corenlp_endpoint)
output = nlp.annotate(content, properties={'outputFormat': 'json', 'timeout': '9999'})
if isinstance(output, str): # str supports both python 2 and 3
output = json.loads(output.encode("latin-1"), strict=False)
if "sentences" in output and isinstance(output["sentences"], list):
for i in range(0, len(output["sentences"])):
s_str = _reconstruct_sent(output["sentences"][i])
# Enhanced dependencies have different key names in JSON depending on version of CoreNLP
possible_keys = [
"enhanced-plus-plus-dependencies-annotation",
"enhancedPlusPlusDependencies"
]
dep_key = "collapsed-ccprocessed-dependencies" # default key
if "collapsed-ccprocessed-dependencies" not in output["sentences"][i]:
for k in possible_keys:
if k in output["sentences"][i]:
dep_key = k
global A
A = Annotations(output["sentences"][i]["tokens"], output["sentences"][i][dep_key])
if A.check_output(output["sentences"][i], stats) is True:
stats.total_sentences += 1
G = _build_graph(show=show_graph)
grobid_response = grobid_quantities(s_str, A, grobid_endpoint)
if isinstance(grobid_response, dict) and "measurements" in grobid_response:
for quantity in grobid_response["measurements"]:
A.augment_match(quantity)
stats.total_measurements += len(A.matches)
for idx, match in enumerate(A.matches):
global Num
Num = match["num"]
match["sentence"] = i + 1
match["grobid"]["related"] = _get_related(stats, match, dependency_patterns_file)
# Remove fields used for processing but not to be shown to user
remove = ["adverbs", "num", "unit", "connector", "form", "sentence", "num_idx", "unit_idx",
"measurement_format"]
[match.pop(x, None) for x in remove]
sort_order = ['adverbs', 'type', 'quantity', 'quantityLeast', 'quantityMost', 'quantified',
'related']
match_ordered = _sorted_dictionary(match["grobid"], sort_order)
if simplify:
simplified_sort_order = ['value', 'unit', 'quantified', 'related']
simplified = _simplify_results(match_ordered)
if simplified:
match_ordered = _sorted_dictionary(match["grobid"], simplified_sort_order)
if pretty and not simplify:
if out:
out.write(json.dumps(match_ordered, ensure_ascii=False, indent=4))
if idx != len(A.matches) - 1 and out:
out.write(",\n")
elif out:
out.write(json.dumps(match_ordered, ensure_ascii=False) + "\n")
all_extractions.extend(A.matches)
else:
logging.warning("CoreNLP parsing failed for sentence: %s" % (s_str))
else:
logging.warning("CoreNLP parsing failed for content: %s" % (content))
if out:
out.close()
logging.info("Total sentences parsed: %s" % (str(stats.total_sentences)))
logging.info("Total measurements found: %s" % (str(stats.total_measurements)))
stats.print_summary()
return all_extractions
def grobid_quantities(sentence, a, endpoint):
"""
a = annotations
"""
"""Pass sentence text to Grobid server on port 8080 for measurement parsing
Args:
sentence (str): Sentence to be parsed
a (Annotations object): object containing relevant CoreNLP output
Returns:
dict: object containing Grobid output
"""
# $ needs to be escaped when passed via subprocess
sentence = re.sub("\$", "\\$", sentence)
sentence = re.sub("\"", '\\"', sentence)
sentence = re.sub("%", '%25', sentence)
sentence = re.sub("`", "'", sentence)
sentence = re.sub("'", '\\"', sentence)
if endpoint[len(endpoint) - 1:] == "/":
endpoint = endpoint[:len(endpoint) - 1]
response = None
# try:
response = QuantitiesClient(endpoint).process_text(sentence)
if response[0] != 200:
print('No Grobid response for: %s' % sentence)
logging.warning('No Grobid response for: %s' % sentence)
return ""
quantities = response[1]
# Add token index for num, unit, quantified if available
if isinstance(quantities, dict):
if "measurements" in quantities.keys():
for q in quantities["measurements"]:
key = ""
if q["type"] == "value":
key = "quantity"
# if Grobid doesn't parse interval correctly, sometimes only 'QuantityLeast' or 'QuantityMost' is available
if q["type"] == "interval":
if "quantityLeast" in q:
key = "quantityLeast"
elif "QuantityMost" in q:
key = "quantityMost"
else:
return {}
if q["type"] == "listc":
return {}
if key == "":
logging.error('Unknown Grobid key resulting from parse of: %s' % sentence)
print("Unknown Grobid key resulting from parse of: %s" % sentence)
# Grobid doesn't pick up negatives
if sentence[sentence.find(q[key]["rawValue"]) - 1] == "-":
q[key]["parsedValue"] = float("-" + str(q[key]["parsedValue"]))
q[key]["rawValue"] = "-" + str(q[key]["rawValue"])
q[key]["offsetStart"] -= 1
if q[key]["offsetStart"] in a.tok_start:
q[key]["tokenIndex"] = a.tok_start[q[key]["offsetStart"]]
else:
print("Not finding token index for Grobid Quantity value in CoreNLP output. Sentence: %s" % sentence)
logging.error( | random_line_split |
||
Measurements.py | ():
connector = None
if isinstance(dep_obj[pos_logic], dict):
for pos in dep_obj[pos_logic].keys():
# Check for allowed part of speech tags in matched dependency patterns
if (pos_logic == "pos_in" and pos in G.nodes[sibling_idx]["pos"]) or (
pos_logic == "pos_equals" and pos == G.nodes[sibling_idx]["pos"]):
pass
elif pos_logic == "pos_not":
if not [False if not_pos == G.nodes[sibling_idx]["pos"] else True for not_pos in
dep_obj.keys()]: continue
else:
continue
# if no additional checks, have a match
if dep_obj[pos_logic][pos] == None or any(
y in dep_obj[pos_logic][pos] for y in [None, "add_sibling"]):
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']])
# if additional checks are required, process further
if dep_obj[pos_logic][pos]:
if "get_cousin" in dep_obj[pos_logic][pos]:
related.extend(_get_cousin(sibling_idx, dep_obj[pos_logic][pos]["get_cousin"]))
connector = G.nodes[sibling_idx]['word']
if "special" in dep_obj[pos_logic][pos]:
if dep == "compound" and pos == "NN":
related = [G.nodes[sibling_idx]['word']]
if None in related:
related.remove(None)
# Allows for getting cousin and returning sibling
if "else" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos]["else"] == "always":
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)
if len(related) > 0 and isinstance(related, list):
for x in related:
if x != None:
all_related = _add_related(x, dep, all_related, A.index_lookup[x],
connector=connector)
elif "else" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos]["else"] == True:
all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,
A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)
return all_related
def _parse_patterns(unit_idx, measurement_format, patterns_file):
""" Loads depedency patters JSON file and uses "_check_criteria" to look for words related to measurement (connected via unit token)
Args:
unit_idx (list): index or indices of measurement unit token(s)
measurement_format (str): indicates form of measurement value + unit (attached: 10m, space between: 10 m, hyphenated: 10-m)
Returns:
list: related words and metadata
"""
all_related = []
for edge in G.edges(data=True):
for idx in unit_idx:
sibling_idx = _get_connected(edge, idx)
if sibling_idx:
with open(os.path.join(basedir, patterns_file), "r") as tree:
tree = json.load(tree)
for dep in tree["dep"].keys():
if tree["dep"][dep]["enhanced"] == True:
for inner_dep in tree["dep"][dep].keys():
if isinstance(tree["dep"][dep][inner_dep], dict) and measurement_format in \
tree["dep"][dep][inner_dep]["measurement_types"]:
full_dep = dep + ":" + inner_dep
full_dep_obj = tree["dep"][dep][inner_dep]
all_related = _check_criteria(full_dep, full_dep_obj, all_related, edge,
sibling_idx)
elif measurement_format in tree["dep"][dep]["measurement_types"]:
all_related = _check_criteria(dep, tree["dep"][dep], all_related, edge, sibling_idx)
for x in range(0, len(tree["word"]["or"])):
if G.nodes[sibling_idx]["word"] == tree["word"]["or"][x]:
related = _get_cousin(sibling_idx, ["nsubj"])
for r in related:
all_related = _add_related(r, "operator", all_related, A.index_lookup[r])
all_related = _add_descriptors(all_related)
return all_related
def _get_related(stats, match, patterns_file):
""" Calls _parse_patterns() to get words related to a measurement and provides de-duplication between related words and grobid response
Args:
stats (Stats object): Global object used to track parsing behaviors
match (dict): information on measurements and units extracted by Grobid
Returns:
list: related words and metadata
"""
all_related = None
measurement_formats = ["space_between", "attached", "hyphenated"]
all_related = _parse_patterns(match["unit_idx"], match["measurement_format"], patterns_file)
if all_related == None:
all_related = _parse_patterns(match["unit_idx"], ["uncertain"], patterns_file)
# get words like approximately
num_adverbs = _parse_patterns([match["num_idx"]], match["measurement_format"], patterns_file)
unit_adverbs = _parse_patterns([match["unit_idx"]], match["measurement_format"], patterns_file)
adverbs = num_adverbs + unit_adverbs
for_removal = []
for a in adverbs:
if a["relationForm"] != "advmod":
for_removal.append(a)
else:
[a.pop(key, None) for key in ["descriptors", "connector"]] # not relevant for adverbs
[adverbs.remove(a) for a in for_removal]
if adverbs:
match["grobid"]["adverbs"] = adverbs
# Check to make sure related isn't already a number, unit, or quantified thing identified by Grobid
potential_keys = ["quantity", "quantityLeast", "quantityMost", "quantified"]
if all_related:
for key in potential_keys:
for related in all_related:
if key in match["grobid"]:
num, unit, quantified = "", "", ""
if "rawValue" in match["grobid"][key]: num = match["grobid"][key]["rawValue"]
if "rawUnit" in match["grobid"][key]: unit = match["grobid"][key]["rawUnit"]["name"]
if "normalizedName" in match["grobid"][key]: quantified = match["grobid"][key]["normalizedName"]
if related["rawName"] in [num, unit, quantified] or related["rawName"] == num + unit or (
quantified in related["rawName"] and not quantified == ""):
all_related.remove(related)
if related["rawName"] == unit:
for k in related.keys():
if not k in match["grobid"][key]["rawUnit"]:
match["grobid"][key]["rawUnit"][k] = related[k]
elif related["rawName"] == quantified:
for k in related.keys():
if not k in match["grobid"][key]:
match["grobid"][key][k] = related[k]
return all_related
def _simplify_results(match):
"""WORK IN PROGRESS: Prune metadata from extracted measurements and related words for more readable output
Args:
match (dict): Object contatining all metadata about extraction types, locations, relationships within sentence
Returns:
list: contains 4 items, extracted numeric value or range (list), unit(s) (list), qunatified words identified by Grobid (str), related words (str)
"""
keys = []
simplified = {}
simplified["value"] = []
if match["type"] == "value":
keys = ["quantity"]
elif match["type"] == "interval":
keys = ["quantityLeast", "quantityMost"]
for key in keys:
if key in match:
if "parsedValue" in match[key]:
simplified["value"].append(match[key]["parsedValue"])
elif "rawValue" in match[key]:
simplified["value"].append(match[key]["rawValue"])
else:
return None
simplified["unit"] = match[key]["rawUnit"]["name"] if "rawUnit" in match[key] else ""
if len(simplified["value"]) == 1:
simplified["value"] = simplified["value"][0]
simplified["quantified"] = {}
simplified["related"] = {}
if "quantified" in match:
if simplified["unit"] == "":
simplified["unit"] = match["quantified"]["normalizedName"]
simplified["quantified"][match["quantified"]["normalizedName"]] = []
if "descriptors" in match["quantified"]:
match["quantified"]["descriptors"].sort(key=lambda x: int(x["tokenIndex"]), reverse=False)
for x in match["quantified"]["descriptors"]:
simplified["quantified"][match["quantified"]["normalizedName"]].append(x["rawName"])
if match["related"]:
for r in match["related"]:
simplified["related"][r["rawName"]] = []
if "descriptors" in r:
| r["descriptors"].sort(key=lambda x: int(x["tokenIndex"]), reverse=False)
for z in r["descriptors"]:
simplified["related"][r["rawName"]].append(z["rawName"]) | conditional_block |
|
nsis.rs | (settings: &Settings, updater: bool) -> crate::Result<Vec<PathBuf>> {
let tauri_tools_path = dirs_next::cache_dir().unwrap().join("tauri");
let nsis_toolset_path = tauri_tools_path.join("NSIS");
if !nsis_toolset_path.exists() {
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
} else if NSIS_REQUIRED_FILES
.iter()
.any(|p| !nsis_toolset_path.join(p).exists())
{
warn!("NSIS directory is missing some files. Recreating it.");
std::fs::remove_dir_all(&nsis_toolset_path)?;
get_and_extract_nsis(&nsis_toolset_path, &tauri_tools_path)?;
}
build_nsis_app_installer(settings, &nsis_toolset_path, &tauri_tools_path, updater)
}
// Gets NSIS and verifies the download via Sha1
fn get_and_extract_nsis(nsis_toolset_path: &Path, _tauri_tools_path: &Path) -> crate::Result<()> {
info!("Verifying NSIS package");
#[cfg(target_os = "windows")]
{
let data = download_and_verify(NSIS_URL, NSIS_SHA1, HashAlgorithm::Sha1)?;
info!("extracting NSIS");
extract_zip(&data, _tauri_tools_path)?;
rename(_tauri_tools_path.join("nsis-3.08"), nsis_toolset_path)?;
}
let nsis_plugins = nsis_toolset_path.join("Plugins");
let data = download(NSIS_APPLICATIONID_URL)?;
info!("extracting NSIS ApplicationID plugin");
extract_zip(&data, &nsis_plugins)?;
create_dir_all(nsis_plugins.join("x86-unicode"))?;
copy(
nsis_plugins
.join("ReleaseUnicode")
.join("ApplicationID.dll"),
nsis_plugins.join("x86-unicode").join("ApplicationID.dll"),
)?;
let data = download_and_verify(NSIS_TAURI_UTILS, NSIS_TAURI_UTILS_SHA1, HashAlgorithm::Sha1)?;
write(
nsis_plugins
.join("x86-unicode")
.join("nsis_tauri_utils.dll"),
data,
)?;
Ok(())
}
fn add_build_number_if_needed(version_str: &str) -> anyhow::Result<String> {
let version = semver::Version::parse(version_str).context("invalid app version")?;
if !version.build.is_empty() {
let build = version.build.parse::<u64>();
if build.is_ok() {
return Ok(format!(
"{}.{}.{}.{}",
version.major, version.minor, version.patch, version.build
));
} else {
anyhow::bail!("optional build metadata in app version must be numeric-only");
}
}
Ok(format!(
"{}.{}.{}.0",
version.major, version.minor, version.patch,
))
}
fn build_nsis_app_installer(
settings: &Settings,
_nsis_toolset_path: &Path,
tauri_tools_path: &Path,
updater: bool,
) -> crate::Result<Vec<PathBuf>> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
"aarch64" => "arm64",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
info!("Target: {}", arch);
#[cfg(target_os = "windows")]
{
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary);
try_sign(&app_exe_source, settings)?;
}
#[cfg(not(target_os = "windows"))]
info!("Code signing is currently only supported on Windows hosts, skipping...");
let output_path = settings.project_out_directory().join("nsis").join(arch);
if output_path.exists() {
remove_dir_all(&output_path)?;
}
create_dir_all(&output_path)?;
let mut data = BTreeMap::new();
let bundle_id = settings.bundle_identifier();
let manufacturer = settings
.publisher()
.unwrap_or_else(|| bundle_id.split('.').nth(1).unwrap_or(bundle_id));
#[cfg(not(target_os = "windows"))]
{
let mut dir = dirs_next::cache_dir().unwrap();
dir.extend(["tauri", "NSIS", "Plugins", "x86-unicode"]);
data.insert("additional_plugins_path", to_json(dir));
}
data.insert("arch", to_json(arch));
data.insert("bundle_id", to_json(bundle_id));
data.insert("manufacturer", to_json(manufacturer));
data.insert("product_name", to_json(settings.product_name()));
data.insert("short_description", to_json(settings.short_description()));
data.insert("copyright", to_json(settings.copyright_string()));
let version = settings.version_string();
data.insert("version", to_json(version));
data.insert(
"version_with_build",
to_json(add_build_number_if_needed(version)?),
);
data.insert(
"allow_downgrades",
to_json(settings.windows().allow_downgrades),
);
let mut install_mode = NSISInstallerMode::CurrentUser;
let mut languages = vec!["English".into()];
let mut custom_template_path = None;
let mut custom_language_files = None;
if let Some(nsis) = &settings.windows().nsis {
custom_template_path = nsis.template.clone();
custom_language_files = nsis.custom_language_files.clone();
install_mode = nsis.install_mode;
if let Some(langs) = &nsis.languages {
languages.clear();
languages.extend_from_slice(langs);
}
if let Some(license) = &nsis.license {
data.insert("license", to_json(dunce::canonicalize(license)?));
}
if let Some(installer_icon) = &nsis.installer_icon {
data.insert(
"installer_icon",
to_json(dunce::canonicalize(installer_icon)?),
);
}
if let Some(header_image) = &nsis.header_image {
data.insert("header_image", to_json(dunce::canonicalize(header_image)?));
}
if let Some(sidebar_image) = &nsis.sidebar_image {
data.insert(
"sidebar_image",
to_json(dunce::canonicalize(sidebar_image)?),
);
}
data.insert(
"display_language_selector",
to_json(nsis.display_language_selector && languages.len() > 1),
);
}
data.insert(
"install_mode",
to_json(match install_mode {
NSISInstallerMode::CurrentUser => "currentUser",
NSISInstallerMode::PerMachine => "perMachine",
NSISInstallerMode::Both => "both",
}),
);
let mut languages_data = Vec::new();
for lang in &languages {
if let Some(data) = get_lang_data(lang, custom_language_files.as_ref())? {
languages_data.push(data);
} else {
log::warn!("Custom tauri messages for {lang} are not translated.\nIf it is a valid language listed on <https://github.com/kichik/nsis/tree/9465c08046f00ccb6eda985abbdbf52c275c6c4d/Contrib/Language%20files>, please open a Tauri feature request\n or you can provide a custom language file for it in `tauri.conf.json > tauri > bundle > windows > nsis > custom_language_files`");
}
}
data.insert("languages", to_json(languages.clone()));
data.insert(
"language_files",
to_json(
languages_data
.iter()
.map(|d| d.0.clone())
.collect::<Vec<_>>(),
),
);
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
data.insert(
"main_binary_name",
to_json(main_binary.name().replace(".exe", "")),
);
data.insert(
"main_binary_path",
to_json(settings.binary_path(main_binary).with_extension("exe")),
);
let out_file = "nsis-output.exe";
data.insert("out_file", to_json(out_file));
let resources = generate_resource_data(settings)?;
data.insert("resources", to_json(resources));
let binaries = generate_binaries_data(settings)?;
data.insert("binaries", to_json(binaries));
if let Some(file_associations) = &settings.file_associations() {
data.insert("file_associations", to_json(file_associations));
}
let silent_webview2_install = if let WebviewInstallMode::DownloadBootstrapper { silent }
| WebviewInstallMode::EmbedBootstrapper { silent }
| WebviewInstallMode::OfflineInstaller { silent } =
settings.windows().webview_install_mode
{
silent
} else {
true
};
let webview2_install_mode = if updater {
WebviewInstallMode::DownloadBootstrapper {
silent | bundle_project | identifier_name |
|
nsis.rs | rename(_tauri_tools_path.join("nsis-3.08"), nsis_toolset_path)?;
}
let nsis_plugins = nsis_toolset_path.join("Plugins");
let data = download(NSIS_APPLICATIONID_URL)?;
info!("extracting NSIS ApplicationID plugin");
extract_zip(&data, &nsis_plugins)?;
create_dir_all(nsis_plugins.join("x86-unicode"))?;
copy(
nsis_plugins
.join("ReleaseUnicode")
.join("ApplicationID.dll"),
nsis_plugins.join("x86-unicode").join("ApplicationID.dll"),
)?;
let data = download_and_verify(NSIS_TAURI_UTILS, NSIS_TAURI_UTILS_SHA1, HashAlgorithm::Sha1)?;
write(
nsis_plugins
.join("x86-unicode")
.join("nsis_tauri_utils.dll"),
data,
)?;
Ok(())
}
fn add_build_number_if_needed(version_str: &str) -> anyhow::Result<String> {
let version = semver::Version::parse(version_str).context("invalid app version")?;
if !version.build.is_empty() {
let build = version.build.parse::<u64>();
if build.is_ok() {
return Ok(format!(
"{}.{}.{}.{}",
version.major, version.minor, version.patch, version.build
));
} else {
anyhow::bail!("optional build metadata in app version must be numeric-only");
}
}
Ok(format!(
"{}.{}.{}.0",
version.major, version.minor, version.patch,
))
}
fn build_nsis_app_installer(
settings: &Settings,
_nsis_toolset_path: &Path,
tauri_tools_path: &Path,
updater: bool,
) -> crate::Result<Vec<PathBuf>> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
"aarch64" => "arm64",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
info!("Target: {}", arch);
#[cfg(target_os = "windows")]
{
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary);
try_sign(&app_exe_source, settings)?;
}
#[cfg(not(target_os = "windows"))]
info!("Code signing is currently only supported on Windows hosts, skipping...");
let output_path = settings.project_out_directory().join("nsis").join(arch);
if output_path.exists() {
remove_dir_all(&output_path)?;
}
create_dir_all(&output_path)?;
let mut data = BTreeMap::new();
let bundle_id = settings.bundle_identifier();
let manufacturer = settings
.publisher()
.unwrap_or_else(|| bundle_id.split('.').nth(1).unwrap_or(bundle_id));
#[cfg(not(target_os = "windows"))]
{
let mut dir = dirs_next::cache_dir().unwrap();
dir.extend(["tauri", "NSIS", "Plugins", "x86-unicode"]);
data.insert("additional_plugins_path", to_json(dir));
}
data.insert("arch", to_json(arch));
data.insert("bundle_id", to_json(bundle_id));
data.insert("manufacturer", to_json(manufacturer));
data.insert("product_name", to_json(settings.product_name()));
data.insert("short_description", to_json(settings.short_description()));
data.insert("copyright", to_json(settings.copyright_string()));
let version = settings.version_string();
data.insert("version", to_json(version));
data.insert(
"version_with_build",
to_json(add_build_number_if_needed(version)?),
);
data.insert(
"allow_downgrades",
to_json(settings.windows().allow_downgrades),
);
let mut install_mode = NSISInstallerMode::CurrentUser;
let mut languages = vec!["English".into()];
let mut custom_template_path = None;
let mut custom_language_files = None;
if let Some(nsis) = &settings.windows().nsis {
custom_template_path = nsis.template.clone();
custom_language_files = nsis.custom_language_files.clone();
install_mode = nsis.install_mode;
if let Some(langs) = &nsis.languages {
languages.clear();
languages.extend_from_slice(langs);
}
if let Some(license) = &nsis.license {
data.insert("license", to_json(dunce::canonicalize(license)?));
}
if let Some(installer_icon) = &nsis.installer_icon {
data.insert(
"installer_icon",
to_json(dunce::canonicalize(installer_icon)?),
);
}
if let Some(header_image) = &nsis.header_image {
data.insert("header_image", to_json(dunce::canonicalize(header_image)?));
}
if let Some(sidebar_image) = &nsis.sidebar_image {
data.insert(
"sidebar_image",
to_json(dunce::canonicalize(sidebar_image)?),
);
}
data.insert(
"display_language_selector",
to_json(nsis.display_language_selector && languages.len() > 1),
);
}
data.insert(
"install_mode",
to_json(match install_mode {
NSISInstallerMode::CurrentUser => "currentUser",
NSISInstallerMode::PerMachine => "perMachine",
NSISInstallerMode::Both => "both",
}),
);
let mut languages_data = Vec::new();
for lang in &languages {
if let Some(data) = get_lang_data(lang, custom_language_files.as_ref())? {
languages_data.push(data);
} else {
log::warn!("Custom tauri messages for {lang} are not translated.\nIf it is a valid language listed on <https://github.com/kichik/nsis/tree/9465c08046f00ccb6eda985abbdbf52c275c6c4d/Contrib/Language%20files>, please open a Tauri feature request\n or you can provide a custom language file for it in `tauri.conf.json > tauri > bundle > windows > nsis > custom_language_files`");
}
}
data.insert("languages", to_json(languages.clone()));
data.insert(
"language_files",
to_json(
languages_data
.iter()
.map(|d| d.0.clone())
.collect::<Vec<_>>(),
),
);
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
data.insert(
"main_binary_name",
to_json(main_binary.name().replace(".exe", "")),
);
data.insert(
"main_binary_path",
to_json(settings.binary_path(main_binary).with_extension("exe")),
);
let out_file = "nsis-output.exe";
data.insert("out_file", to_json(out_file));
let resources = generate_resource_data(settings)?;
data.insert("resources", to_json(resources));
let binaries = generate_binaries_data(settings)?;
data.insert("binaries", to_json(binaries));
if let Some(file_associations) = &settings.file_associations() {
data.insert("file_associations", to_json(file_associations));
}
let silent_webview2_install = if let WebviewInstallMode::DownloadBootstrapper { silent }
| WebviewInstallMode::EmbedBootstrapper { silent }
| WebviewInstallMode::OfflineInstaller { silent } =
settings.windows().webview_install_mode
{
silent
} else {
true
};
let webview2_install_mode = if updater {
WebviewInstallMode::DownloadBootstrapper {
silent: silent_webview2_install,
}
} else {
let mut webview_install_mode = settings.windows().webview_install_mode.clone();
if let Some(fixed_runtime_path) = settings.windows().webview_fixed_runtime_path.clone() {
webview_install_mode = WebviewInstallMode::FixedRuntime {
path: fixed_runtime_path,
};
} else if let Some(wix) = &settings.windows().wix {
if wix.skip_webview_install {
webview_install_mode = WebviewInstallMode::Skip;
}
}
webview_install_mode
};
let webview2_installer_args = to_json(if silent_webview2_install {
"/silent"
} else {
""
});
data.insert("webview2_installer_args", to_json(webview2_installer_args));
data.insert(
"install_webview2_mode",
to_json(match webview2_install_mode {
WebviewInstallMode::DownloadBootstrapper { silent: _ } => "downloadBootstrapper",
WebviewInstallMode::EmbedBootstrapper { silent: _ } => "embedBootstrapper",
WebviewInstallMode::OfflineInstaller { silent: _ } => "offlineInstaller",
_ => "",
}),
);
match webview2_install_mode {
WebviewInstallMode | info!("extracting NSIS");
extract_zip(&data, _tauri_tools_path)?; | random_line_split |
|
nsis.rs | ER_GUID
};
let offline_installer_path = tauri_tools_path
.join("Webview2OfflineInstaller")
.join(guid)
.join(arch);
create_dir_all(&offline_installer_path)?;
let webview2_installer_path =
offline_installer_path.join("MicrosoftEdgeWebView2RuntimeInstaller.exe");
if !webview2_installer_path.exists() {
std::fs::write(
&webview2_installer_path,
download(
&format!("https://msedge.sf.dl.delivery.mp.microsoft.com/filestreamingservice/files/{}/MicrosoftEdgeWebView2RuntimeInstaller{}.exe",
guid,
arch.to_uppercase(),
),
)?,
)?;
}
data.insert("webview2_installer_path", to_json(webview2_installer_path));
}
_ => {}
}
let mut handlebars = Handlebars::new();
handlebars.register_helper("or", Box::new(handlebars_or));
handlebars.register_helper("association-description", Box::new(association_description));
handlebars.register_escape_fn(|s| {
let mut output = String::new();
for c in s.chars() {
match c {
'\"' => output.push_str("$\\\""),
'$' => output.push_str("$$"),
'`' => output.push_str("$\\`"),
'\n' => output.push_str("$\\n"),
'\t' => output.push_str("$\\t"),
'\r' => output.push_str("$\\r"),
_ => output.push(c),
}
}
output
});
if let Some(path) = custom_template_path {
handlebars
.register_template_string("installer.nsi", std::fs::read_to_string(path)?)
.map_err(|e| e.to_string())
.expect("Failed to setup custom handlebar template");
} else {
handlebars
.register_template_string("installer.nsi", include_str!("./templates/installer.nsi"))
.map_err(|e| e.to_string())
.expect("Failed to setup handlebar template");
}
write_ut16_le_with_bom(
&output_path.join("FileAssociation.nsh"),
include_str!("./templates/FileAssociation.nsh"),
)?;
let installer_nsi_path = output_path.join("installer.nsi");
write_ut16_le_with_bom(
&installer_nsi_path,
handlebars.render("installer.nsi", &data)?.as_str(),
)?;
for (lang, data) in languages_data.iter() {
if let Some(content) = data {
write_ut16_le_with_bom(output_path.join(lang).with_extension("nsh"), content)?;
}
}
let package_base_name = format!(
"{}_{}_{}-setup",
main_binary.name().replace(".exe", ""),
settings.version_string(),
arch,
);
let nsis_output_path = output_path.join(out_file);
let nsis_installer_path = settings.project_out_directory().to_path_buf().join(format!(
"bundle/{}/{}.exe",
if updater {
NSIS_UPDATER_OUTPUT_FOLDER_NAME
} else {
NSIS_OUTPUT_FOLDER_NAME
},
package_base_name
));
create_dir_all(nsis_installer_path.parent().unwrap())?;
info!(action = "Running"; "makensis.exe to produce {}", display_path(&nsis_installer_path));
#[cfg(target_os = "windows")]
let mut nsis_cmd = Command::new(_nsis_toolset_path.join("makensis.exe"));
#[cfg(not(target_os = "windows"))]
let mut nsis_cmd = Command::new("makensis");
nsis_cmd
.arg(match settings.log_level() {
log::Level::Error => "-V1",
log::Level::Warn => "-V2",
log::Level::Info => "-V3",
_ => "-V4",
})
.arg(installer_nsi_path)
.current_dir(output_path)
.piped()
.context("error running makensis.exe")?;
rename(nsis_output_path, &nsis_installer_path)?;
// Code signing is currently only supported on Windows hosts
#[cfg(target_os = "windows")]
try_sign(&nsis_installer_path, settings)?;
Ok(vec![nsis_installer_path])
}
fn handlebars_or(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let param1 = h.param(0).unwrap().render();
let param2 = h.param(1).unwrap();
out.write(&if param1.is_empty() {
param2.render()
} else {
param1
})?;
Ok(())
}
fn association_description(
h: &handlebars::Helper<'_, '_>,
_: &Handlebars<'_>,
_: &handlebars::Context,
_: &mut handlebars::RenderContext<'_, '_>,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
let description = h.param(0).unwrap().render();
let ext = h.param(1).unwrap();
out.write(&if description.is_empty() {
format!("{} File", ext.render().to_uppercase())
} else {
description
})?;
Ok(())
}
/// BTreeMap<OriginalPath, (ParentOfTargetPath, TargetPath)>
type ResourcesMap = BTreeMap<PathBuf, (String, PathBuf)>;
fn generate_resource_data(settings: &Settings) -> crate::Result<ResourcesMap> {
let mut resources = ResourcesMap::new();
let cwd = std::env::current_dir()?;
let mut added_resources = Vec::new();
for src in settings.resource_files() {
let src = src?;
let resource_path = dunce::canonicalize(cwd.join(&src))?;
// In some glob resource paths like `assets/**/*` a file might appear twice
// because the `tauri_utils::resources::ResourcePaths` iterator also reads a directory
// when it finds one. So we must check it before processing the file.
if added_resources.contains(&resource_path) {
continue;
}
added_resources.push(resource_path.clone());
let target_path = resource_relpath(&src);
resources.insert(
resource_path,
(
target_path
.parent()
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_default(),
target_path,
),
);
}
Ok(resources)
}
/// BTreeMap<OriginalPath, TargetFileName>
type BinariesMap = BTreeMap<PathBuf, String>;
fn generate_binaries_data(settings: &Settings) -> crate::Result<BinariesMap> {
let mut binaries = BinariesMap::new();
let cwd = std::env::current_dir()?;
for src in settings.external_binaries() {
let src = src?;
let binary_path = dunce::canonicalize(cwd.join(&src))?;
let dest_filename = src
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.replace(&format!("-{}", settings.target()), "");
binaries.insert(binary_path, dest_filename);
}
for bin in settings.binaries() {
if !bin.main() {
let bin_path = settings.binary_path(bin);
binaries.insert(
bin_path.clone(),
bin_path
.file_name()
.expect("failed to extract external binary filename")
.to_string_lossy()
.to_string(),
);
}
}
Ok(binaries)
}
fn get_lang_data(
lang: &str,
custom_lang_files: Option<&HashMap<String, PathBuf>>,
) -> crate::Result<Option<(PathBuf, Option<&'static str>)>> | {
if let Some(path) = custom_lang_files.and_then(|h| h.get(lang)) {
return Ok(Some((dunce::canonicalize(path)?, None)));
}
let lang_path = PathBuf::from(format!("{lang}.nsh"));
let lang_content = match lang.to_lowercase().as_str() {
"arabic" => Some(include_str!("./templates/nsis-languages/Arabic.nsh")),
"bulgarian" => Some(include_str!("./templates/nsis-languages/Bulgarian.nsh")),
"dutch" => Some(include_str!("./templates/nsis-languages/Dutch.nsh")),
"english" => Some(include_str!("./templates/nsis-languages/English.nsh")),
"japanese" => Some(include_str!("./templates/nsis-languages/Japanese.nsh")),
"korean" => Some(include_str!("./templates/nsis-languages/Korean.nsh")),
"portuguesebr" => Some(include_str!("./templates/nsis-languages/PortugueseBR.nsh")),
"tradchinese" => Some(include_str!("./templates/nsis-languages/TradChinese.nsh")),
"simpchinese" => Some(include_str!("./templates/nsis-languages/SimpChinese.nsh")),
"french" => Some(include_str!("./templates/nsis-languages/French.nsh")),
"spanish" => Some(include_str!("./templates/nsis-languages/Spanish.nsh")),
"spanishinternational" => Some(include_str!(
"./templates/nsis-languages/SpanishInternational.nsh" | identifier_body |
|
scheduler.go | mu".
toBeScheduledClusterOperations chan ClusterOperationInstance
// Guarded by "mu".
state schedulerState
// Guarded by "taskCreatorMu". May be overridden by testing code.
taskCreator taskCreator
taskCreatorMu sync.Mutex
pendingOpsWg *sync.WaitGroup
muOpList sync.Mutex
// Guarded by "muOpList".
// The key of the map is ClusterOperationInstance.ID.
// This map contains a copy of the ClusterOperationInstance which is currently processed.
// The scheduler may update the copy with the latest status.
activeClusterOperations map[string]ClusterOperationInstance
// Guarded by "muOpList".
// The key of the map is ClusterOperationInstance.ID.
finishedClusterOperations map[string]ClusterOperationInstance
}
// NewScheduler creates a new instance.
func NewScheduler() (*Scheduler, error) {
defaultClusterOperations := map[string]bool{
"HorizontalReshardingTask": true,
"VerticalSplitTask": true,
}
s := &Scheduler{
registeredClusterOperations: defaultClusterOperations,
idGenerator: IDGenerator{},
toBeScheduledClusterOperations: make(chan ClusterOperationInstance, 10),
state: stateNotRunning,
taskCreator: defaultTaskCreator,
pendingOpsWg: &sync.WaitGroup{},
activeClusterOperations: make(map[string]ClusterOperationInstance),
finishedClusterOperations: make(map[string]ClusterOperationInstance),
}
return s, nil
}
func (s *Scheduler) registerClusterOperation(clusterOperationName string) {
s.mu.Lock()
defer s.mu.Unlock()
s.registeredClusterOperations[clusterOperationName] = true
}
// Run processes queued cluster operations.
func (s *Scheduler) Run() {
s.mu.Lock()
s.state = stateRunning
s.mu.Unlock()
s.startProcessRequestsLoop()
}
func (s *Scheduler) startProcessRequestsLoop() {
// Use a WaitGroup instead of just a done channel, because we want
// to be able to shut down the scheduler even if Run() was never executed.
s.pendingOpsWg.Add(1)
go s.processRequestsLoop()
}
func (s *Scheduler) processRequestsLoop() {
defer s.pendingOpsWg.Done()
for op := range s.toBeScheduledClusterOperations {
s.processClusterOperation(op)
}
log.Infof("Stopped processing loop for ClusterOperations.")
}
func (s *Scheduler) processClusterOperation(clusterOp ClusterOperationInstance) {
if clusterOp.State == automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE {
log.Infof("ClusterOperation: %v skipping because it is already done. Details: %v", clusterOp.Id, clusterOp)
return
}
log.Infof("ClusterOperation: %v running. Details: %v", clusterOp.Id, clusterOp)
clusterOpLoop:
for i := 0; i < len(clusterOp.SerialTasks); i++ {
taskContainer := clusterOp.SerialTasks[i]
for _, taskProto := range taskContainer.ParallelTasks {
newTaskContainers, output, err := s.runTask(taskProto, clusterOp.Id)
if err != nil {
MarkTaskFailed(taskProto, output, err)
clusterOp.Error = err.Error()
break clusterOpLoop
} else {
MarkTaskSucceeded(taskProto, output)
}
if newTaskContainers != nil {
// Make sure all new tasks do not miss any required parameters.
err := s.validateTaskContainers(newTaskContainers)
if err != nil {
err = vterrors.Wrapf(err, "task: %v (%v/%v) emitted a new task which is not valid. Error: %v", taskProto.Name, clusterOp.Id, taskProto.Id, err)
log.Error(err)
MarkTaskFailed(taskProto, output, err)
clusterOp.Error = err.Error()
break clusterOpLoop
}
clusterOp.InsertTaskContainers(newTaskContainers, i+1)
log.Infof("ClusterOperation: %v %d new task containers added by %v (%v/%v). Updated ClusterOperation: %v",
clusterOp.Id, len(newTaskContainers), taskProto.Name, clusterOp.Id, taskProto.Id, clusterOp)
}
s.Checkpoint(clusterOp)
}
}
clusterOp.State = automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE
log.Infof("ClusterOperation: %v finished. Details: %v", clusterOp.Id, clusterOp)
s.Checkpoint(clusterOp)
// Move operation from active to finished.
s.muOpList.Lock()
defer s.muOpList.Unlock()
if _, ok := s.activeClusterOperations[clusterOp.Id]; !ok |
delete(s.activeClusterOperations, clusterOp.Id)
s.finishedClusterOperations[clusterOp.Id] = clusterOp
}
func (s *Scheduler) runTask(taskProto *automationpb.Task, clusterOpID string) ([]*automationpb.TaskContainer, string, error) {
if taskProto.State == automationpb.TaskState_DONE {
// Task is already done (e.g. because we resume from a checkpoint).
if taskProto.Error != "" {
log.Errorf("Task: %v (%v/%v) failed before. Aborting the ClusterOperation. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto.Error, taskProto)
return nil, "", errors.New(taskProto.Error)
}
log.Infof("Task: %v (%v/%v) skipped because it is already done. Full Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
return nil, taskProto.Output, nil
}
task, err := s.createTaskInstance(taskProto.Name)
if err != nil {
log.Errorf("Task: %v (%v/%v) could not be instantiated. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, err, taskProto)
return nil, "", err
}
taskProto.State = automationpb.TaskState_RUNNING
log.Infof("Task: %v (%v/%v) running. Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
newTaskContainers, output, err := task.Run(taskProto.Parameters)
log.Infof("Task: %v (%v/%v) finished. newTaskContainers: %v, output: %v, error: %v", taskProto.Name, clusterOpID, taskProto.Id, newTaskContainers, output, err)
return newTaskContainers, output, err
}
func (s *Scheduler) validateTaskContainers(newTaskContainers []*automationpb.TaskContainer) error {
for _, newTaskContainer := range newTaskContainers {
for _, newTaskProto := range newTaskContainer.ParallelTasks {
err := s.validateTaskSpecification(newTaskProto.Name, newTaskProto.Parameters)
if err != nil {
return fmt.Errorf("error: %v task: %v", err, newTaskProto)
}
}
}
return nil
}
func defaultTaskCreator(taskName string) Task {
switch taskName {
case "HorizontalReshardingTask":
return &HorizontalReshardingTask{}
case "VerticalSplitTask":
return &VerticalSplitTask{}
case "CopySchemaShardTask":
return &CopySchemaShardTask{}
case "MigrateServedFromTask":
return &MigrateServedFromTask{}
case "MigrateServedTypesTask":
return &MigrateServedTypesTask{}
case "RebuildKeyspaceGraph":
return &RebuildKeyspaceGraphTask{}
case "SplitCloneTask":
return &SplitCloneTask{}
case "SplitDiffTask":
return &SplitDiffTask{}
case "VerticalSplitCloneTask":
return &VerticalSplitCloneTask{}
case "VerticalSplitDiffTask":
return &VerticalSplitDiffTask{}
case "WaitForFilteredReplicationTask":
return &WaitForFilteredReplicationTask{}
default:
return nil
}
}
func (s *Scheduler) setTaskCreator(creator taskCreator) {
s.taskCreatorMu.Lock()
defer s.taskCreatorMu.Unlock()
s.taskCreator = creator
}
func (s *Scheduler) validateTaskSpecification(taskName string, parameters map[string]string) error {
taskInstanceForParametersCheck, err := s.createTaskInstance(taskName)
if err != nil {
return err
}
errParameters := validateParameters(taskInstanceForParametersCheck, parameters)
if errParameters != nil {
return errParameters
}
return nil
}
func (s *Scheduler) createTaskInstance(taskName string) (Task, error) {
s.taskCreatorMu.Lock()
taskCreator := s.taskCreator
s.taskCreatorMu.Unlock()
task := taskCreator(taskName)
if task == nil {
return nil, fmt.Errorf("no implementation found for: %v", taskName)
}
return task, nil
}
// validateParameters returns an error if not all required parameters are provided in "parameters".
// Unknown parameters (neither required nor optional) result in an error.
func validateParameters(task Task, parameters map[string]string) error {
validParams := make(map[string]bool)
var missingParams []string
for _, reqParam := range task.RequiredParameters() {
if _, ok := parameters[reqParam]; ok {
validParams[reqParam] = true
} else {
missingParams = append(missingParams, reqParam)
}
}
if len(missingParams) > 0 {
return fmt.Errorf("required parameters are missing: %v", missing | {
panic("Pending ClusterOperation was not recorded as active, but should have.")
} | conditional_block |
scheduler.go | mu".
toBeScheduledClusterOperations chan ClusterOperationInstance
// Guarded by "mu".
state schedulerState
// Guarded by "taskCreatorMu". May be overridden by testing code.
taskCreator taskCreator
taskCreatorMu sync.Mutex
pendingOpsWg *sync.WaitGroup
muOpList sync.Mutex
// Guarded by "muOpList".
// The key of the map is ClusterOperationInstance.ID.
// This map contains a copy of the ClusterOperationInstance which is currently processed.
// The scheduler may update the copy with the latest status.
activeClusterOperations map[string]ClusterOperationInstance
// Guarded by "muOpList".
// The key of the map is ClusterOperationInstance.ID.
finishedClusterOperations map[string]ClusterOperationInstance
}
// NewScheduler creates a new instance.
func NewScheduler() (*Scheduler, error) {
defaultClusterOperations := map[string]bool{
"HorizontalReshardingTask": true,
"VerticalSplitTask": true,
}
s := &Scheduler{
registeredClusterOperations: defaultClusterOperations,
idGenerator: IDGenerator{},
toBeScheduledClusterOperations: make(chan ClusterOperationInstance, 10),
state: stateNotRunning,
taskCreator: defaultTaskCreator,
pendingOpsWg: &sync.WaitGroup{},
activeClusterOperations: make(map[string]ClusterOperationInstance),
finishedClusterOperations: make(map[string]ClusterOperationInstance),
}
return s, nil
}
func (s *Scheduler) registerClusterOperation(clusterOperationName string) {
s.mu.Lock()
defer s.mu.Unlock()
s.registeredClusterOperations[clusterOperationName] = true
}
// Run processes queued cluster operations.
func (s *Scheduler) Run() {
s.mu.Lock()
s.state = stateRunning
s.mu.Unlock()
s.startProcessRequestsLoop()
}
func (s *Scheduler) startProcessRequestsLoop() {
// Use a WaitGroup instead of just a done channel, because we want
// to be able to shut down the scheduler even if Run() was never executed.
s.pendingOpsWg.Add(1)
go s.processRequestsLoop()
}
func (s *Scheduler) processRequestsLoop() {
defer s.pendingOpsWg.Done()
for op := range s.toBeScheduledClusterOperations {
s.processClusterOperation(op)
}
log.Infof("Stopped processing loop for ClusterOperations.")
}
func (s *Scheduler) processClusterOperation(clusterOp ClusterOperationInstance) {
if clusterOp.State == automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE {
log.Infof("ClusterOperation: %v skipping because it is already done. Details: %v", clusterOp.Id, clusterOp)
return
}
log.Infof("ClusterOperation: %v running. Details: %v", clusterOp.Id, clusterOp)
clusterOpLoop:
for i := 0; i < len(clusterOp.SerialTasks); i++ {
taskContainer := clusterOp.SerialTasks[i]
for _, taskProto := range taskContainer.ParallelTasks {
newTaskContainers, output, err := s.runTask(taskProto, clusterOp.Id)
if err != nil {
MarkTaskFailed(taskProto, output, err)
clusterOp.Error = err.Error()
break clusterOpLoop
} else {
MarkTaskSucceeded(taskProto, output)
}
if newTaskContainers != nil {
// Make sure all new tasks do not miss any required parameters.
err := s.validateTaskContainers(newTaskContainers)
if err != nil {
err = vterrors.Wrapf(err, "task: %v (%v/%v) emitted a new task which is not valid. Error: %v", taskProto.Name, clusterOp.Id, taskProto.Id, err)
log.Error(err)
MarkTaskFailed(taskProto, output, err)
clusterOp.Error = err.Error()
break clusterOpLoop
}
clusterOp.InsertTaskContainers(newTaskContainers, i+1)
log.Infof("ClusterOperation: %v %d new task containers added by %v (%v/%v). Updated ClusterOperation: %v",
clusterOp.Id, len(newTaskContainers), taskProto.Name, clusterOp.Id, taskProto.Id, clusterOp)
}
s.Checkpoint(clusterOp)
}
}
clusterOp.State = automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE
log.Infof("ClusterOperation: %v finished. Details: %v", clusterOp.Id, clusterOp)
s.Checkpoint(clusterOp)
// Move operation from active to finished.
s.muOpList.Lock()
defer s.muOpList.Unlock()
if _, ok := s.activeClusterOperations[clusterOp.Id]; !ok {
panic("Pending ClusterOperation was not recorded as active, but should have.")
}
delete(s.activeClusterOperations, clusterOp.Id)
s.finishedClusterOperations[clusterOp.Id] = clusterOp
}
func (s *Scheduler) runTask(taskProto *automationpb.Task, clusterOpID string) ([]*automationpb.TaskContainer, string, error) {
if taskProto.State == automationpb.TaskState_DONE {
// Task is already done (e.g. because we resume from a checkpoint).
if taskProto.Error != "" {
log.Errorf("Task: %v (%v/%v) failed before. Aborting the ClusterOperation. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto.Error, taskProto)
return nil, "", errors.New(taskProto.Error)
}
log.Infof("Task: %v (%v/%v) skipped because it is already done. Full Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
return nil, taskProto.Output, nil
}
task, err := s.createTaskInstance(taskProto.Name)
if err != nil {
log.Errorf("Task: %v (%v/%v) could not be instantiated. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, err, taskProto)
return nil, "", err
}
taskProto.State = automationpb.TaskState_RUNNING
log.Infof("Task: %v (%v/%v) running. Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
newTaskContainers, output, err := task.Run(taskProto.Parameters)
log.Infof("Task: %v (%v/%v) finished. newTaskContainers: %v, output: %v, error: %v", taskProto.Name, clusterOpID, taskProto.Id, newTaskContainers, output, err)
return newTaskContainers, output, err
}
func (s *Scheduler) validateTaskContainers(newTaskContainers []*automationpb.TaskContainer) error {
for _, newTaskContainer := range newTaskContainers {
for _, newTaskProto := range newTaskContainer.ParallelTasks {
err := s.validateTaskSpecification(newTaskProto.Name, newTaskProto.Parameters)
if err != nil {
return fmt.Errorf("error: %v task: %v", err, newTaskProto)
}
}
}
return nil
}
func defaultTaskCreator(taskName string) Task {
switch taskName {
case "HorizontalReshardingTask":
return &HorizontalReshardingTask{}
case "VerticalSplitTask":
return &VerticalSplitTask{}
case "CopySchemaShardTask":
return &CopySchemaShardTask{}
case "MigrateServedFromTask":
return &MigrateServedFromTask{}
case "MigrateServedTypesTask":
return &MigrateServedTypesTask{}
case "RebuildKeyspaceGraph":
return &RebuildKeyspaceGraphTask{}
case "SplitCloneTask":
return &SplitCloneTask{}
case "SplitDiffTask":
return &SplitDiffTask{}
case "VerticalSplitCloneTask":
return &VerticalSplitCloneTask{}
case "VerticalSplitDiffTask":
return &VerticalSplitDiffTask{}
case "WaitForFilteredReplicationTask":
return &WaitForFilteredReplicationTask{}
default:
return nil
}
}
func (s *Scheduler) setTaskCreator(creator taskCreator) {
s.taskCreatorMu.Lock()
defer s.taskCreatorMu.Unlock()
s.taskCreator = creator
}
func (s *Scheduler) validateTaskSpecification(taskName string, parameters map[string]string) error |
func (s *Scheduler) createTaskInstance(taskName string) (Task, error) {
s.taskCreatorMu.Lock()
taskCreator := s.taskCreator
s.taskCreatorMu.Unlock()
task := taskCreator(taskName)
if task == nil {
return nil, fmt.Errorf("no implementation found for: %v", taskName)
}
return task, nil
}
// validateParameters returns an error if not all required parameters are provided in "parameters".
// Unknown parameters (neither required nor optional) result in an error.
func validateParameters(task Task, parameters map[string]string) error {
validParams := make(map[string]bool)
var missingParams []string
for _, reqParam := range task.RequiredParameters() {
if _, ok := parameters[reqParam]; ok {
validParams[reqParam] = true
} else {
missingParams = append(missingParams, reqParam)
}
}
if len(missingParams) > 0 {
return fmt.Errorf("required parameters are missing: %v", missing | {
taskInstanceForParametersCheck, err := s.createTaskInstance(taskName)
if err != nil {
return err
}
errParameters := validateParameters(taskInstanceForParametersCheck, parameters)
if errParameters != nil {
return errParameters
}
return nil
} | identifier_body |
scheduler.go | mu".
toBeScheduledClusterOperations chan ClusterOperationInstance
// Guarded by "mu".
state schedulerState
// Guarded by "taskCreatorMu". May be overridden by testing code.
taskCreator taskCreator
taskCreatorMu sync.Mutex
pendingOpsWg *sync.WaitGroup
muOpList sync.Mutex
// Guarded by "muOpList". | // The scheduler may update the copy with the latest status.
activeClusterOperations map[string]ClusterOperationInstance
// Guarded by "muOpList".
// The key of the map is ClusterOperationInstance.ID.
finishedClusterOperations map[string]ClusterOperationInstance
}
// NewScheduler creates a new instance.
func NewScheduler() (*Scheduler, error) {
defaultClusterOperations := map[string]bool{
"HorizontalReshardingTask": true,
"VerticalSplitTask": true,
}
s := &Scheduler{
registeredClusterOperations: defaultClusterOperations,
idGenerator: IDGenerator{},
toBeScheduledClusterOperations: make(chan ClusterOperationInstance, 10),
state: stateNotRunning,
taskCreator: defaultTaskCreator,
pendingOpsWg: &sync.WaitGroup{},
activeClusterOperations: make(map[string]ClusterOperationInstance),
finishedClusterOperations: make(map[string]ClusterOperationInstance),
}
return s, nil
}
func (s *Scheduler) registerClusterOperation(clusterOperationName string) {
s.mu.Lock()
defer s.mu.Unlock()
s.registeredClusterOperations[clusterOperationName] = true
}
// Run processes queued cluster operations.
func (s *Scheduler) Run() {
s.mu.Lock()
s.state = stateRunning
s.mu.Unlock()
s.startProcessRequestsLoop()
}
func (s *Scheduler) startProcessRequestsLoop() {
// Use a WaitGroup instead of just a done channel, because we want
// to be able to shut down the scheduler even if Run() was never executed.
s.pendingOpsWg.Add(1)
go s.processRequestsLoop()
}
func (s *Scheduler) processRequestsLoop() {
defer s.pendingOpsWg.Done()
for op := range s.toBeScheduledClusterOperations {
s.processClusterOperation(op)
}
log.Infof("Stopped processing loop for ClusterOperations.")
}
func (s *Scheduler) processClusterOperation(clusterOp ClusterOperationInstance) {
if clusterOp.State == automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE {
log.Infof("ClusterOperation: %v skipping because it is already done. Details: %v", clusterOp.Id, clusterOp)
return
}
log.Infof("ClusterOperation: %v running. Details: %v", clusterOp.Id, clusterOp)
clusterOpLoop:
for i := 0; i < len(clusterOp.SerialTasks); i++ {
taskContainer := clusterOp.SerialTasks[i]
for _, taskProto := range taskContainer.ParallelTasks {
newTaskContainers, output, err := s.runTask(taskProto, clusterOp.Id)
if err != nil {
MarkTaskFailed(taskProto, output, err)
clusterOp.Error = err.Error()
break clusterOpLoop
} else {
MarkTaskSucceeded(taskProto, output)
}
if newTaskContainers != nil {
// Make sure all new tasks do not miss any required parameters.
err := s.validateTaskContainers(newTaskContainers)
if err != nil {
err = vterrors.Wrapf(err, "task: %v (%v/%v) emitted a new task which is not valid. Error: %v", taskProto.Name, clusterOp.Id, taskProto.Id, err)
log.Error(err)
MarkTaskFailed(taskProto, output, err)
clusterOp.Error = err.Error()
break clusterOpLoop
}
clusterOp.InsertTaskContainers(newTaskContainers, i+1)
log.Infof("ClusterOperation: %v %d new task containers added by %v (%v/%v). Updated ClusterOperation: %v",
clusterOp.Id, len(newTaskContainers), taskProto.Name, clusterOp.Id, taskProto.Id, clusterOp)
}
s.Checkpoint(clusterOp)
}
}
clusterOp.State = automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE
log.Infof("ClusterOperation: %v finished. Details: %v", clusterOp.Id, clusterOp)
s.Checkpoint(clusterOp)
// Move operation from active to finished.
s.muOpList.Lock()
defer s.muOpList.Unlock()
if _, ok := s.activeClusterOperations[clusterOp.Id]; !ok {
panic("Pending ClusterOperation was not recorded as active, but should have.")
}
delete(s.activeClusterOperations, clusterOp.Id)
s.finishedClusterOperations[clusterOp.Id] = clusterOp
}
func (s *Scheduler) runTask(taskProto *automationpb.Task, clusterOpID string) ([]*automationpb.TaskContainer, string, error) {
if taskProto.State == automationpb.TaskState_DONE {
// Task is already done (e.g. because we resume from a checkpoint).
if taskProto.Error != "" {
log.Errorf("Task: %v (%v/%v) failed before. Aborting the ClusterOperation. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto.Error, taskProto)
return nil, "", errors.New(taskProto.Error)
}
log.Infof("Task: %v (%v/%v) skipped because it is already done. Full Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
return nil, taskProto.Output, nil
}
task, err := s.createTaskInstance(taskProto.Name)
if err != nil {
log.Errorf("Task: %v (%v/%v) could not be instantiated. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, err, taskProto)
return nil, "", err
}
taskProto.State = automationpb.TaskState_RUNNING
log.Infof("Task: %v (%v/%v) running. Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
newTaskContainers, output, err := task.Run(taskProto.Parameters)
log.Infof("Task: %v (%v/%v) finished. newTaskContainers: %v, output: %v, error: %v", taskProto.Name, clusterOpID, taskProto.Id, newTaskContainers, output, err)
return newTaskContainers, output, err
}
func (s *Scheduler) validateTaskContainers(newTaskContainers []*automationpb.TaskContainer) error {
for _, newTaskContainer := range newTaskContainers {
for _, newTaskProto := range newTaskContainer.ParallelTasks {
err := s.validateTaskSpecification(newTaskProto.Name, newTaskProto.Parameters)
if err != nil {
return fmt.Errorf("error: %v task: %v", err, newTaskProto)
}
}
}
return nil
}
func defaultTaskCreator(taskName string) Task {
switch taskName {
case "HorizontalReshardingTask":
return &HorizontalReshardingTask{}
case "VerticalSplitTask":
return &VerticalSplitTask{}
case "CopySchemaShardTask":
return &CopySchemaShardTask{}
case "MigrateServedFromTask":
return &MigrateServedFromTask{}
case "MigrateServedTypesTask":
return &MigrateServedTypesTask{}
case "RebuildKeyspaceGraph":
return &RebuildKeyspaceGraphTask{}
case "SplitCloneTask":
return &SplitCloneTask{}
case "SplitDiffTask":
return &SplitDiffTask{}
case "VerticalSplitCloneTask":
return &VerticalSplitCloneTask{}
case "VerticalSplitDiffTask":
return &VerticalSplitDiffTask{}
case "WaitForFilteredReplicationTask":
return &WaitForFilteredReplicationTask{}
default:
return nil
}
}
func (s *Scheduler) setTaskCreator(creator taskCreator) {
s.taskCreatorMu.Lock()
defer s.taskCreatorMu.Unlock()
s.taskCreator = creator
}
func (s *Scheduler) validateTaskSpecification(taskName string, parameters map[string]string) error {
taskInstanceForParametersCheck, err := s.createTaskInstance(taskName)
if err != nil {
return err
}
errParameters := validateParameters(taskInstanceForParametersCheck, parameters)
if errParameters != nil {
return errParameters
}
return nil
}
func (s *Scheduler) createTaskInstance(taskName string) (Task, error) {
s.taskCreatorMu.Lock()
taskCreator := s.taskCreator
s.taskCreatorMu.Unlock()
task := taskCreator(taskName)
if task == nil {
return nil, fmt.Errorf("no implementation found for: %v", taskName)
}
return task, nil
}
// validateParameters returns an error if not all required parameters are provided in "parameters".
// Unknown parameters (neither required nor optional) result in an error.
func validateParameters(task Task, parameters map[string]string) error {
validParams := make(map[string]bool)
var missingParams []string
for _, reqParam := range task.RequiredParameters() {
if _, ok := parameters[reqParam]; ok {
validParams[reqParam] = true
} else {
missingParams = append(missingParams, reqParam)
}
}
if len(missingParams) > 0 {
return fmt.Errorf("required parameters are missing: %v", missingParams)
| // The key of the map is ClusterOperationInstance.ID.
// This map contains a copy of the ClusterOperationInstance which is currently processed. | random_line_split |
scheduler.go | } else {
MarkTaskSucceeded(taskProto, output)
}
if newTaskContainers != nil {
// Make sure all new tasks do not miss any required parameters.
err := s.validateTaskContainers(newTaskContainers)
if err != nil {
err = vterrors.Wrapf(err, "task: %v (%v/%v) emitted a new task which is not valid. Error: %v", taskProto.Name, clusterOp.Id, taskProto.Id, err)
log.Error(err)
MarkTaskFailed(taskProto, output, err)
clusterOp.Error = err.Error()
break clusterOpLoop
}
clusterOp.InsertTaskContainers(newTaskContainers, i+1)
log.Infof("ClusterOperation: %v %d new task containers added by %v (%v/%v). Updated ClusterOperation: %v",
clusterOp.Id, len(newTaskContainers), taskProto.Name, clusterOp.Id, taskProto.Id, clusterOp)
}
s.Checkpoint(clusterOp)
}
}
clusterOp.State = automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE
log.Infof("ClusterOperation: %v finished. Details: %v", clusterOp.Id, clusterOp)
s.Checkpoint(clusterOp)
// Move operation from active to finished.
s.muOpList.Lock()
defer s.muOpList.Unlock()
if _, ok := s.activeClusterOperations[clusterOp.Id]; !ok {
panic("Pending ClusterOperation was not recorded as active, but should have.")
}
delete(s.activeClusterOperations, clusterOp.Id)
s.finishedClusterOperations[clusterOp.Id] = clusterOp
}
func (s *Scheduler) runTask(taskProto *automationpb.Task, clusterOpID string) ([]*automationpb.TaskContainer, string, error) {
if taskProto.State == automationpb.TaskState_DONE {
// Task is already done (e.g. because we resume from a checkpoint).
if taskProto.Error != "" {
log.Errorf("Task: %v (%v/%v) failed before. Aborting the ClusterOperation. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto.Error, taskProto)
return nil, "", errors.New(taskProto.Error)
}
log.Infof("Task: %v (%v/%v) skipped because it is already done. Full Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
return nil, taskProto.Output, nil
}
task, err := s.createTaskInstance(taskProto.Name)
if err != nil {
log.Errorf("Task: %v (%v/%v) could not be instantiated. Error: %v Details: %v", taskProto.Name, clusterOpID, taskProto.Id, err, taskProto)
return nil, "", err
}
taskProto.State = automationpb.TaskState_RUNNING
log.Infof("Task: %v (%v/%v) running. Details: %v", taskProto.Name, clusterOpID, taskProto.Id, taskProto)
newTaskContainers, output, err := task.Run(taskProto.Parameters)
log.Infof("Task: %v (%v/%v) finished. newTaskContainers: %v, output: %v, error: %v", taskProto.Name, clusterOpID, taskProto.Id, newTaskContainers, output, err)
return newTaskContainers, output, err
}
func (s *Scheduler) validateTaskContainers(newTaskContainers []*automationpb.TaskContainer) error {
for _, newTaskContainer := range newTaskContainers {
for _, newTaskProto := range newTaskContainer.ParallelTasks {
err := s.validateTaskSpecification(newTaskProto.Name, newTaskProto.Parameters)
if err != nil {
return fmt.Errorf("error: %v task: %v", err, newTaskProto)
}
}
}
return nil
}
func defaultTaskCreator(taskName string) Task {
switch taskName {
case "HorizontalReshardingTask":
return &HorizontalReshardingTask{}
case "VerticalSplitTask":
return &VerticalSplitTask{}
case "CopySchemaShardTask":
return &CopySchemaShardTask{}
case "MigrateServedFromTask":
return &MigrateServedFromTask{}
case "MigrateServedTypesTask":
return &MigrateServedTypesTask{}
case "RebuildKeyspaceGraph":
return &RebuildKeyspaceGraphTask{}
case "SplitCloneTask":
return &SplitCloneTask{}
case "SplitDiffTask":
return &SplitDiffTask{}
case "VerticalSplitCloneTask":
return &VerticalSplitCloneTask{}
case "VerticalSplitDiffTask":
return &VerticalSplitDiffTask{}
case "WaitForFilteredReplicationTask":
return &WaitForFilteredReplicationTask{}
default:
return nil
}
}
func (s *Scheduler) setTaskCreator(creator taskCreator) {
s.taskCreatorMu.Lock()
defer s.taskCreatorMu.Unlock()
s.taskCreator = creator
}
func (s *Scheduler) validateTaskSpecification(taskName string, parameters map[string]string) error {
taskInstanceForParametersCheck, err := s.createTaskInstance(taskName)
if err != nil {
return err
}
errParameters := validateParameters(taskInstanceForParametersCheck, parameters)
if errParameters != nil {
return errParameters
}
return nil
}
func (s *Scheduler) createTaskInstance(taskName string) (Task, error) {
s.taskCreatorMu.Lock()
taskCreator := s.taskCreator
s.taskCreatorMu.Unlock()
task := taskCreator(taskName)
if task == nil {
return nil, fmt.Errorf("no implementation found for: %v", taskName)
}
return task, nil
}
// validateParameters returns an error if not all required parameters are provided in "parameters".
// Unknown parameters (neither required nor optional) result in an error.
func validateParameters(task Task, parameters map[string]string) error {
validParams := make(map[string]bool)
var missingParams []string
for _, reqParam := range task.RequiredParameters() {
if _, ok := parameters[reqParam]; ok {
validParams[reqParam] = true
} else {
missingParams = append(missingParams, reqParam)
}
}
if len(missingParams) > 0 {
return fmt.Errorf("required parameters are missing: %v", missingParams)
}
for _, optParam := range task.OptionalParameters() {
validParams[optParam] = true
}
for param := range parameters {
if !validParams[param] {
return fmt.Errorf("parameter %v is not allowed. Allowed required parameters: %v optional parameters: %v",
param, task.RequiredParameters(), task.OptionalParameters())
}
}
return nil
}
// EnqueueClusterOperation can be used to start a new cluster operation.
func (s *Scheduler) EnqueueClusterOperation(ctx context.Context, req *automationpb.EnqueueClusterOperationRequest) (*automationpb.EnqueueClusterOperationResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.state != stateRunning {
return nil, fmt.Errorf("scheduler is not running. State: %v", s.state)
}
if !s.registeredClusterOperations[req.Name] {
return nil, fmt.Errorf("no ClusterOperation with name: %v is registered", req.Name)
}
err := s.validateTaskSpecification(req.Name, req.Parameters)
if err != nil {
return nil, err
}
clusterOpID := s.idGenerator.GetNextID()
taskIDGenerator := IDGenerator{}
initialTask := NewTaskContainerWithSingleTask(req.Name, req.Parameters)
clusterOp := NewClusterOperationInstance(clusterOpID, initialTask, &taskIDGenerator)
s.muOpList.Lock()
s.activeClusterOperations[clusterOpID] = clusterOp.Clone()
s.muOpList.Unlock()
s.toBeScheduledClusterOperations <- clusterOp
return &automationpb.EnqueueClusterOperationResponse{
Id: clusterOp.Id,
}, nil
}
// findClusterOp checks for a given ClusterOperation ID if it's in the list of active or finished operations.
func (s *Scheduler) findClusterOp(id string) (ClusterOperationInstance, error) {
var ok bool
var clusterOp ClusterOperationInstance
s.muOpList.Lock()
defer s.muOpList.Unlock()
clusterOp, ok = s.activeClusterOperations[id]
if !ok {
clusterOp, ok = s.finishedClusterOperations[id]
}
if !ok {
return clusterOp, fmt.Errorf("ClusterOperation with id: %v not found", id)
}
return clusterOp.Clone(), nil
}
// Checkpoint should be called every time the state of the cluster op changes.
// It is used to update the copy of the state in activeClusterOperations.
func (s *Scheduler) Checkpoint(clusterOp ClusterOperationInstance) {
// TODO(mberlin): Add here support for persistent checkpoints.
s.muOpList.Lock()
defer s.muOpList.Unlock()
s.activeClusterOperations[clusterOp.Id] = clusterOp.Clone()
}
// GetClusterOperationDetails can be used to query the full details of active or finished operations.
func (s *Scheduler) GetClusterOperationDetails(ctx context.Context, req *automationpb.GetClusterOperationDetailsRequest) (*automationpb.GetClusterOperationDetailsResponse, error) {
clusterOp, err := s.findClusterOp(req.Id)
if err != nil {
return nil, err
}
return &automationpb.GetClusterOperationDetailsResponse{
ClusterOp: clusterOp.ClusterOperation,
}, nil
}
// ShutdownAndWait shuts down the scheduler and waits infinitely until all pending cluster operations have finished.
func (s *Scheduler) | ShutdownAndWait | identifier_name |
|
index.js | onParentPriorityChange(priority, _parent) {
// Assume we only have one parent.
this.priority = priority + 1;
}
_attach() {}
_detach() {}
/** Notify observers of a change to our value */
_onChange(value, idle) {
if (idle === void 0) {
idle = false;
}
// Clone "_children" so it can be safely mutated by the loop.
for (const observer of Array.from(this._children)) {
if (is.fun(observer)) {
observer(value, this);
} else {
observer.onParentChange(value, idle, this);
}
}
}
/** Notify observers of a change to our priority */
_onPriorityChange(priority) {
each(this._children, observer => {
if (!is.fun(observer)) {
observer.onParentPriorityChange(priority, this);
}
});
}
/** Reset our node and the nodes of every descendant */
_reset(goal) {
this.node.reset(!this.idle, goal);
each(this._children, observer => {
if (isAnimationValue(observer)) {
observer._reset(goal);
}
});
}
}
/** An object containing `Animated` nodes */
class AnimatedObject extends Animated {
constructor(source) {
if (source === void 0) {
source = null;
}
super();
this.setValue(source);
}
getValue(animated) {
if (!this.source) return null;
const values = {};
each(this.source, (source, key) => {
if (isAnimated(source)) {
values[key] = source.getValue(animated);
} else if (isFluidValue(source)) {
values[key] = source.get();
} else if (!animated) {
values[key] = source;
}
});
return values;
}
/** Replace the raw object data */
setValue(source) {
this.source = source;
this.payload = this._makePayload(source);
}
reset(isActive, _goal) {
if (this.payload) {
each(this.payload, node => node.reset(isActive));
}
}
/** Create a payload set. */
_makePayload(source) {
if (source) {
const payload = new Set();
each(source, this._addToPayload, payload);
return Array.from(payload);
}
}
/** Add to a payload set. */
_addToPayload(source) {
if (isFluidValue(source)) {
if (Animated.context) {
Animated.context.dependencies.add(source);
}
if (isAnimationValue(source)) {
source = source.node;
}
}
if (isAnimated(source)) {
each(source.getPayload(), node => this.add(node));
}
}
}
class AnimatedStyle extends AnimatedObject {
constructor(style) {
super(style || null);
}
setValue(style) {
super.setValue(style && style.transform && createAnimatedTransform ? _extends({}, style, {
transform: createAnimatedTransform(style.transform)
}) : style);
}
}
/** An animated number or a native attribute value */
class AnimatedValue extends Animated {
constructor(_value) {
super();
this._value = _value;
this.done = true;
if (is.num(this._value)) {
this.lastPosition = this._value;
}
}
static create(from, _to) {
return new AnimatedValue(from);
}
getPayload() {
return [this];
}
getValue() {
return this._value;
}
/**
* Set the current value and optionally round it.
*
* The `step` argument does nothing whenever it equals `undefined` or `0`.
* It works with fractions and whole numbers. The best use case is (probably)
* rounding to the pixel grid with a step of:
*
* 1 / window.devicePixelRatio
*/
setValue(value, step) {
if (is.num(value)) {
this.lastPosition = value;
if (step) {
value = Math.round(value / step) * step;
if (this.done) {
this.lastPosition = value;
}
}
}
if (this._value === value) {
return false;
}
this._value = value;
return true;
}
reset(isActive, _goal) {
this.done = false;
if (is.num(this._value)) {
this.elapsedTime = 0;
this.lastPosition = this._value;
if (!isActive) this.lastVelocity = null;
this.v0 = null;
}
}
}
/**
* `Into` springs are memoized interpolators that react to their dependencies.
* The memoized result is updated whenever a dependency changes.
*/
class Into extends AnimationValue {
constructor(
/** The source of input values */
source, args) {
super();
this.source = source;
this.calc = createInterpolator(...args);
this.node = new AnimatedValue(this._compute());
}
get idle() {
return this.node.done;
}
_compute() {
const inputs = is.arr(this.source) ? this.source.map(node => node.get()) : toArray(this.source.get());
return this.calc(...inputs);
}
_attach() {
// Start observing our "source" once we have an observer.
let priority = 0;
each(toArray(this.source), source => {
priority = Math.max(priority, (source.priority || 0) + 1);
source.addChild(this);
});
this.priority = priority;
}
_detach() {
// Stop observing our "source" once we have no observers.
each(toArray(this.source), source => {
source.removeChild(this);
});
}
/** @internal */
onParentChange(_value, idle) {
const node = this.node;
if (idle && !node.done) {
// We're not idle until every source is idle.
node.done = toArray(this.source).every(source => !isAnimationValue(source) || source.idle);
} // TODO: only compute once per frame (note: we'll need to call "onParentChange")
const value = this._compute();
if (!isEqual(value, this.get())) {
node.setValue(value);
this._onChange(value, node.done);
}
}
/** @internal */
onParentPriorityChange(_priority) {
// Set our priority to 1 + the highest parent.
this.priority = toArray(this.source).reduce((max, source) => Math.max(max, (source.priority || 0) + 1), 0);
}
}
Globals.assign({
to: (source, args) => new Into(source, args),
createAnimatedStyle: style => new AnimatedStyle(style)
});
class AnimatedString extends AnimatedValue {
constructor(from, to) {
super(0);
this._string = null;
this._toString = createInterpolator$1({
output: [from, to]
});
}
static create(from, to) {
if (to === void 0) {
to = from;
}
if (is$1.str(from) && is$1.str(to)) {
return new AnimatedString(from, to);
}
throw TypeError('Expected "from" and "to" to be strings');
}
getValue() {
let value = this._string;
return value == null ? this._string = this._toString(this._value) : value;
}
setValue(value) {
if (!is$1.num(value)) {
this._string = value;
this._value = 1;
} else if (super.setValue(value)) {
this._string = null;
} else {
return false;
}
return true;
}
reset(isActive, goal) {
if (goal) {
this._toString = createInterpolator$1({
output: [this.getValue(), goal]
});
}
this._value = 0;
super.reset(isActive);
}
}
/** An array of animated nodes */
class AnimatedArray extends AnimatedObject {
constructor(from, to) {
super(null);
super.setValue(this._makeAnimated(from, to));
}
static create(from, to) {
return new AnimatedArray(from, to);
}
getValue() {
return this.source.map(node => node.getValue());
}
setValue(newValue) {
const payload = this.getPayload(); // Reuse the payload when lengths are equal.
if (newValue && newValue.length == payload.length) {
each(payload, (node, i) => node.setValue(newValue[i]));
} else {
// Remake the payload when length changes.
this.source = this._makeAnimated(newValue);
this.payload = this._makePayload(this.source);
}
}
/** Convert the `from` and `to` values to an array of `Animated` nodes */
_makeAnimated(from, to) {
if (to === void 0) {
to = from;
}
return from ? from.map((from, i) => (needsInterpolation(from) ? AnimatedString : AnimatedValue).create(from, to[i])) : [];
}
}
class AnimatedProps extends AnimatedObject {
constructor(update) {
super(null);
this.update = update;
/** Equals true when a re-render is scheduled for "end of frame" */
this.dirty = false;
}
setValue(props, context) {
if (!props) return; // The constructor passes null.
| if (context) {
Animated.context = context;
}
| random_line_split |
|
index.js | ._priority != priority) {
this._priority = priority;
this._onPriorityChange(priority);
}
}
/** Get the current value */
get() {
// The node doesn't exist until the first update, which normally isn't an
// issue but it can be for tests.
return this.node && this.node.getValue();
}
/** Create a spring that maps our value to another value */
to() {
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
args[_key] = arguments[_key];
}
return to(this, args);
}
/** @deprecated Use the `to` method instead. */
interpolate() {
deprecateInterpolate();
for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
args[_key2] = arguments[_key2];
}
return to(this, args);
}
/** @internal */
addChild(child) {
if (!this._children.size) this._attach();
this._children.add(child);
}
/** @internal */
removeChild(child) {
this._children.delete(child);
if (!this._children.size) this._detach();
}
/** @internal */
onParentPriorityChange(priority, _parent) {
// Assume we only have one parent.
this.priority = priority + 1;
}
_attach() {}
_detach() {}
/** Notify observers of a change to our value */
_onChange(value, idle) {
if (idle === void 0) {
idle = false;
}
// Clone "_children" so it can be safely mutated by the loop.
for (const observer of Array.from(this._children)) {
if (is.fun(observer)) {
observer(value, this);
} else {
observer.onParentChange(value, idle, this);
}
}
}
/** Notify observers of a change to our priority */
_onPriorityChange(priority) {
each(this._children, observer => {
if (!is.fun(observer)) {
observer.onParentPriorityChange(priority, this);
}
});
}
/** Reset our node and the nodes of every descendant */
_reset(goal) {
this.node.reset(!this.idle, goal);
each(this._children, observer => {
if (isAnimationValue(observer)) {
observer._reset(goal);
}
});
}
}
/** An object containing `Animated` nodes */
class AnimatedObject extends Animated {
constructor(source) {
if (source === void 0) {
source = null;
}
super();
this.setValue(source);
}
getValue(animated) {
if (!this.source) return null;
const values = {};
each(this.source, (source, key) => {
if (isAnimated(source)) {
values[key] = source.getValue(animated);
} else if (isFluidValue(source)) {
values[key] = source.get();
} else if (!animated) {
values[key] = source;
}
});
return values;
}
/** Replace the raw object data */
setValue(source) {
this.source = source;
this.payload = this._makePayload(source);
}
reset(isActive, _goal) {
if (this.payload) {
each(this.payload, node => node.reset(isActive));
}
}
/** Create a payload set. */
_makePayload(source) {
if (source) {
const payload = new Set();
each(source, this._addToPayload, payload);
return Array.from(payload);
}
}
/** Add to a payload set. */
_addToPayload(source) {
if (isFluidValue(source)) {
if (Animated.context) {
Animated.context.dependencies.add(source);
}
if (isAnimationValue(source)) {
source = source.node;
}
}
if (isAnimated(source)) {
each(source.getPayload(), node => this.add(node));
}
}
}
class AnimatedStyle extends AnimatedObject {
constructor(style) {
super(style || null);
}
setValue(style) {
super.setValue(style && style.transform && createAnimatedTransform ? _extends({}, style, {
transform: createAnimatedTransform(style.transform)
}) : style);
}
}
/** An animated number or a native attribute value */
class AnimatedValue extends Animated {
constructor(_value) {
super();
this._value = _value;
this.done = true;
if (is.num(this._value)) {
this.lastPosition = this._value;
}
}
static create(from, _to) {
return new AnimatedValue(from);
}
getPayload() {
return [this];
}
getValue() {
return this._value;
}
/**
* Set the current value and optionally round it.
*
* The `step` argument does nothing whenever it equals `undefined` or `0`.
* It works with fractions and whole numbers. The best use case is (probably)
* rounding to the pixel grid with a step of:
*
* 1 / window.devicePixelRatio
*/
setValue(value, step) {
if (is.num(value)) {
this.lastPosition = value;
if (step) {
value = Math.round(value / step) * step;
if (this.done) {
this.lastPosition = value;
}
}
}
if (this._value === value) {
return false;
}
this._value = value;
return true;
}
reset(isActive, _goal) {
this.done = false;
if (is.num(this._value)) {
this.elapsedTime = 0;
this.lastPosition = this._value;
if (!isActive) this.lastVelocity = null;
this.v0 = null;
}
}
}
/**
* `Into` springs are memoized interpolators that react to their dependencies.
* The memoized result is updated whenever a dependency changes.
*/
class Into extends AnimationValue {
constructor(
/** The source of input values */
source, args) {
super();
this.source = source;
this.calc = createInterpolator(...args);
this.node = new AnimatedValue(this._compute());
}
get idle() {
return this.node.done;
}
_compute() {
const inputs = is.arr(this.source) ? this.source.map(node => node.get()) : toArray(this.source.get());
return this.calc(...inputs);
}
_attach() {
// Start observing our "source" once we have an observer.
let priority = 0;
each(toArray(this.source), source => {
priority = Math.max(priority, (source.priority || 0) + 1);
source.addChild(this);
});
this.priority = priority;
}
_detach() {
// Stop observing our "source" once we have no observers.
each(toArray(this.source), source => {
source.removeChild(this);
});
}
/** @internal */
onParentChange(_value, idle) {
const node = this.node;
if (idle && !node.done) {
// We're not idle until every source is idle.
node.done = toArray(this.source).every(source => !isAnimationValue(source) || source.idle);
} // TODO: only compute once per frame (note: we'll need to call "onParentChange")
const value = this._compute();
if (!isEqual(value, this.get())) {
node.setValue(value);
this._onChange(value, node.done);
}
}
/** @internal */
onParentPriorityChange(_priority) {
// Set our priority to 1 + the highest parent.
this.priority = toArray(this.source).reduce((max, source) => Math.max(max, (source.priority || 0) + 1), 0);
}
}
Globals.assign({
to: (source, args) => new Into(source, args),
createAnimatedStyle: style => new AnimatedStyle(style)
});
class AnimatedString extends AnimatedValue {
constructor(from, to) {
super(0);
this._string = null;
this._toString = createInterpolator$1({
output: [from, to]
});
}
static create(from, to) {
if (to === void 0) {
to = from;
}
if (is$1.str(from) && is$1.str(to)) {
return new AnimatedString(from, to);
}
throw TypeError('Expected "from" and "to" to be strings');
}
| () {
let value = this._string;
return value == null ? this._string = this._toString(this._value) : value;
}
setValue(value) {
if (!is$1.num(value)) {
this._string = value;
this._value = 1;
} else if (super.setValue(value)) {
this._string = null;
} else {
return false;
}
return true;
}
reset(isActive, goal) {
if (goal) {
this._toString = createInterpolator$1({
output: [this.getValue(), goal]
});
}
this._value = 0;
super.reset(isActive);
}
}
/** An array of animated nodes */
class AnimatedArray extends AnimatedObject {
constructor(from, to) {
super(null);
super.setValue | getValue | identifier_name |
index.js | ._priority != priority) {
this._priority = priority;
this._onPriorityChange(priority);
}
}
/** Get the current value */
get() {
// The node doesn't exist until the first update, which normally isn't an
// issue but it can be for tests.
return this.node && this.node.getValue();
}
/** Create a spring that maps our value to another value */
to() {
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
args[_key] = arguments[_key];
}
return to(this, args);
}
/** @deprecated Use the `to` method instead. */
interpolate() {
deprecateInterpolate();
for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
args[_key2] = arguments[_key2];
}
return to(this, args);
}
/** @internal */
addChild(child) {
if (!this._children.size) this._attach();
this._children.add(child);
}
/** @internal */
removeChild(child) {
this._children.delete(child);
if (!this._children.size) this._detach();
}
/** @internal */
onParentPriorityChange(priority, _parent) {
// Assume we only have one parent.
this.priority = priority + 1;
}
_attach() {}
_detach() {}
/** Notify observers of a change to our value */
_onChange(value, idle) {
if (idle === void 0) {
idle = false;
}
// Clone "_children" so it can be safely mutated by the loop.
for (const observer of Array.from(this._children)) {
if (is.fun(observer)) {
observer(value, this);
} else {
observer.onParentChange(value, idle, this);
}
}
}
/** Notify observers of a change to our priority */
_onPriorityChange(priority) {
each(this._children, observer => {
if (!is.fun(observer)) |
});
}
/** Reset our node and the nodes of every descendant */
_reset(goal) {
this.node.reset(!this.idle, goal);
each(this._children, observer => {
if (isAnimationValue(observer)) {
observer._reset(goal);
}
});
}
}
/** An object containing `Animated` nodes */
class AnimatedObject extends Animated {
constructor(source) {
if (source === void 0) {
source = null;
}
super();
this.setValue(source);
}
getValue(animated) {
if (!this.source) return null;
const values = {};
each(this.source, (source, key) => {
if (isAnimated(source)) {
values[key] = source.getValue(animated);
} else if (isFluidValue(source)) {
values[key] = source.get();
} else if (!animated) {
values[key] = source;
}
});
return values;
}
/** Replace the raw object data */
setValue(source) {
this.source = source;
this.payload = this._makePayload(source);
}
reset(isActive, _goal) {
if (this.payload) {
each(this.payload, node => node.reset(isActive));
}
}
/** Create a payload set. */
_makePayload(source) {
if (source) {
const payload = new Set();
each(source, this._addToPayload, payload);
return Array.from(payload);
}
}
/** Add to a payload set. */
_addToPayload(source) {
if (isFluidValue(source)) {
if (Animated.context) {
Animated.context.dependencies.add(source);
}
if (isAnimationValue(source)) {
source = source.node;
}
}
if (isAnimated(source)) {
each(source.getPayload(), node => this.add(node));
}
}
}
class AnimatedStyle extends AnimatedObject {
constructor(style) {
super(style || null);
}
setValue(style) {
super.setValue(style && style.transform && createAnimatedTransform ? _extends({}, style, {
transform: createAnimatedTransform(style.transform)
}) : style);
}
}
/** An animated number or a native attribute value */
class AnimatedValue extends Animated {
constructor(_value) {
super();
this._value = _value;
this.done = true;
if (is.num(this._value)) {
this.lastPosition = this._value;
}
}
static create(from, _to) {
return new AnimatedValue(from);
}
getPayload() {
return [this];
}
getValue() {
return this._value;
}
/**
* Set the current value and optionally round it.
*
* The `step` argument does nothing whenever it equals `undefined` or `0`.
* It works with fractions and whole numbers. The best use case is (probably)
* rounding to the pixel grid with a step of:
*
* 1 / window.devicePixelRatio
*/
setValue(value, step) {
if (is.num(value)) {
this.lastPosition = value;
if (step) {
value = Math.round(value / step) * step;
if (this.done) {
this.lastPosition = value;
}
}
}
if (this._value === value) {
return false;
}
this._value = value;
return true;
}
reset(isActive, _goal) {
this.done = false;
if (is.num(this._value)) {
this.elapsedTime = 0;
this.lastPosition = this._value;
if (!isActive) this.lastVelocity = null;
this.v0 = null;
}
}
}
/**
* `Into` springs are memoized interpolators that react to their dependencies.
* The memoized result is updated whenever a dependency changes.
*/
class Into extends AnimationValue {
constructor(
/** The source of input values */
source, args) {
super();
this.source = source;
this.calc = createInterpolator(...args);
this.node = new AnimatedValue(this._compute());
}
get idle() {
return this.node.done;
}
_compute() {
const inputs = is.arr(this.source) ? this.source.map(node => node.get()) : toArray(this.source.get());
return this.calc(...inputs);
}
_attach() {
// Start observing our "source" once we have an observer.
let priority = 0;
each(toArray(this.source), source => {
priority = Math.max(priority, (source.priority || 0) + 1);
source.addChild(this);
});
this.priority = priority;
}
_detach() {
// Stop observing our "source" once we have no observers.
each(toArray(this.source), source => {
source.removeChild(this);
});
}
/** @internal */
onParentChange(_value, idle) {
const node = this.node;
if (idle && !node.done) {
// We're not idle until every source is idle.
node.done = toArray(this.source).every(source => !isAnimationValue(source) || source.idle);
} // TODO: only compute once per frame (note: we'll need to call "onParentChange")
const value = this._compute();
if (!isEqual(value, this.get())) {
node.setValue(value);
this._onChange(value, node.done);
}
}
/** @internal */
onParentPriorityChange(_priority) {
// Set our priority to 1 + the highest parent.
this.priority = toArray(this.source).reduce((max, source) => Math.max(max, (source.priority || 0) + 1), 0);
}
}
Globals.assign({
to: (source, args) => new Into(source, args),
createAnimatedStyle: style => new AnimatedStyle(style)
});
class AnimatedString extends AnimatedValue {
constructor(from, to) {
super(0);
this._string = null;
this._toString = createInterpolator$1({
output: [from, to]
});
}
static create(from, to) {
if (to === void 0) {
to = from;
}
if (is$1.str(from) && is$1.str(to)) {
return new AnimatedString(from, to);
}
throw TypeError('Expected "from" and "to" to be strings');
}
getValue() {
let value = this._string;
return value == null ? this._string = this._toString(this._value) : value;
}
setValue(value) {
if (!is$1.num(value)) {
this._string = value;
this._value = 1;
} else if (super.setValue(value)) {
this._string = null;
} else {
return false;
}
return true;
}
reset(isActive, goal) {
if (goal) {
this._toString = createInterpolator$1({
output: [this.getValue(), goal]
});
}
this._value = 0;
super.reset(isActive);
}
}
/** An array of animated nodes */
class AnimatedArray extends AnimatedObject {
constructor(from, to) {
super(null);
| {
observer.onParentPriorityChange(priority, this);
} | conditional_block |
train.py |
def main(args):
# Create model directory
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Image preprocessing
# For normalization, see https://github.com/pytorch/vision#models
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper.
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
#read rationalization data
rationalizations = []
max_length = 0
lengths = []
bad_worker_ids = ['A2CNSIECB9UP05','A23782O23HSPLA','A2F9ZBSR6AXXND','A3GI86L18Z71XY','AIXTI8PKSX1D2','A2QWHXMFQI18GQ','A3SB7QYI84HYJT',
'A2Q2A7AB6MMFLI','A2P1KI42CJVNIA','A1IJXPKZTJV809','A2WZ0RZMKQ2WGJ','A3EKETMVGU2PM9','A1OCEC1TBE3CWA','AE1RYK54MH11G','A2ADEPVGNNXNPA',
'A15QGLWS8CNJFU','A18O3DEA5Z4MJD','AAAL4RENVAPML','A3TZBZ92CQKQLG','ABO9F0JD9NN54','A8F6JFG0WSELT','ARN9ET3E608LJ','A2TCYNRAZWK8CC',
'A32BK0E1IPDUAF','ANNV3E6CIVCW4']
with open('./Log/Rationalizations.txt') as f:
for line in f:
line = line.lower()
line = re.sub('[^a-z\ \']+', " ", line)
words = line.split()
length = len(words)
lengths.append(length)
if length>max_length:
max_length = length
for index,word in enumerate(words):
words[index] = vocab.word2idx[word]
rationalizations.append(words)
# max_length = max(rationalizations,key=len
rationalizations=[np.array(xi) for xi in rationalizations]
# for index,r in enumerate(rationalizations):
# # print(max_length)
# r = np.lib.pad(r,(0,max_length - len(r)),'constant')
# rationalizations[index] = r
# rationalizations = np.vstack(rationalizations)
# print(rationalizations)
# print(rationalizations.shape)
# print(torch.from_numpy(rationalizations))
# rationalizations = torch.from_numpy(rationalizations)
# print(np.asarray(rationalizations).reshape(rationalizations.shape,rationalizations.shape))
# Build data loader
data_loader = get_loader(args.image_dir, args.caption_path, vocab,
transform, args.batch_size,
shuffle=True, num_workers=args.num_workers)
# Build the models
encoder = EncoderCNN(args.embed_size)
decoder = DecoderRNN(args.embed_size, args.hidden_size,
len(vocab), args.num_layers)
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.bn.parameters())
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
frogger_data_loader = get_images('./data/FroggerDataset/',args.batch_size,transform)
# exit(0)
# Train the Models
# data = iter(frogger_data_loader)
# imgs = data.next()[0]
# print(imgs)
# print(frogger_data_loader[0])
# exit(0)
# for i,(images) in enumerate(frogger_data_loader):
# print(images)
total_step = len(frogger_data_loader)
for epoch in range(args.num_epochs):
for i,x in enumerate(frogger_data_loader):
# print(x)
# print(x[0])
# exit(0)
# print(x[0])
# exit(0)
images = to_var(x[0], volatile=True)
print(images[0][1])
exit(0)
captions = []
max_length = max(lengths[i:i+2])
rats = rationalizations[i:i+2]
rats.sort(key = lambda s: len(s))
rats.reverse()
# print(rats)
# exit(0)
for index,r in enumerate(rats):
# print(max_length)
r = np.lib.pad(r,(0,max_length - len(r)),'constant')
captions.append(r)
# rationalizations = np.vstack(rationalizations)
# captions.sort(key = lambda s: len(s))
captions = to_var(torch.from_numpy(np.asarray(captions)))
# lengths.append(len(rationalizations[i]))
new_lengths = []
# new_lengths.append(lengths[i])
new_lengths = lengths[i:i+2]
new_lengths.sort()
new_lengths.reverse()
captions = captions
# print(captions)
# print(new_lengths)
targets = pack_padded_sequence(captions, new_lengths, batch_first=True)[0]
decoder.zero_grad()
encoder.zero_grad()
# print(images)
features = encoder(images)
# print(features)
# print(rats)
# print(len(lengths))
outputs = decoder(features, captions, new_lengths)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# Print log info
if i % args.log_step == 0:
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
%(epoch, args.num_epochs, i, total_step,
loss.data[0], np.exp(loss.data[0])))
# Save the models
if (i+1) % args.save_step == 0:
torch.save(decoder.state_dict(),
os.path.join(args.model_path,
'decoder-%d-%d.pkl' %(epoch+1, i+1)))
torch.save(encoder.state_dict(),
os.path.join(args.model_path,
'encoder-%d-%d.pkl' %(epoch+1, i+1)))
# exit(0)
# total_step = len(data_loader)
# for epoch in range(args.num_epochs):
# for i, (images, captions, lengths) in enumerate(data_loader):
# # print(captions)
# # print(images)
# # print(lengths)
# # print(captions)
# # # print(images)
# # exit(0)
# # Set mini-batch dataset
# images = to_var(images, volatile=True)
# print(captions)
# captions = to_var(captions)
# print(captions)
# print(lengths)
# targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
# # Forward, Backward and Optimize
# decoder.zero_grad()
# encoder.zero_grad()
# print(images)
# features = encoder(images)
# print(features)
# exit(0)
# outputs = decoder(features, captions, lengths)
# loss = criterion(outputs, targets)
# loss.backward()
# optimizer.step()
# # Print log info
# if i % args.log_step == 0:
# print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
# %(epoch, args.num_epochs, i, total_step,
# loss.data[0], np.exp(loss.data[0])))
# # Save the models
# if (i+1) % args.save_step == 0:
# torch.save(decoder.state_dict(),
# os.path.join(args.model_path,
# 'decoder-%d-%d.pkl' %(epoch+1, i+1)))
# torch.save(encoder.state_dict(),
# os.path.join(args.model_path,
# 'encoder-%d-%d.pkl' %(epoch+1, i+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='./models/' ,
help='path for saving trained models')
parser.add_argument('--crop_size', type=int, default=224 ,
help='size for randomly cropping images')
parser.add_argument('--vocab_path', type=str, default='./data/vocab_frogger | if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile) | identifier_body |
|
train.py | # Create model directory
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Image preprocessing
# For normalization, see https://github.com/pytorch/vision#models
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper.
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
#read rationalization data
rationalizations = []
max_length = 0
lengths = []
bad_worker_ids = ['A2CNSIECB9UP05','A23782O23HSPLA','A2F9ZBSR6AXXND','A3GI86L18Z71XY','AIXTI8PKSX1D2','A2QWHXMFQI18GQ','A3SB7QYI84HYJT',
'A2Q2A7AB6MMFLI','A2P1KI42CJVNIA','A1IJXPKZTJV809','A2WZ0RZMKQ2WGJ','A3EKETMVGU2PM9','A1OCEC1TBE3CWA','AE1RYK54MH11G','A2ADEPVGNNXNPA',
'A15QGLWS8CNJFU','A18O3DEA5Z4MJD','AAAL4RENVAPML','A3TZBZ92CQKQLG','ABO9F0JD9NN54','A8F6JFG0WSELT','ARN9ET3E608LJ','A2TCYNRAZWK8CC',
'A32BK0E1IPDUAF','ANNV3E6CIVCW4']
with open('./Log/Rationalizations.txt') as f:
for line in f:
line = line.lower()
line = re.sub('[^a-z\ \']+', " ", line)
words = line.split()
length = len(words)
lengths.append(length)
if length>max_length:
|
for index,word in enumerate(words):
words[index] = vocab.word2idx[word]
rationalizations.append(words)
# max_length = max(rationalizations,key=len
rationalizations=[np.array(xi) for xi in rationalizations]
# for index,r in enumerate(rationalizations):
# # print(max_length)
# r = np.lib.pad(r,(0,max_length - len(r)),'constant')
# rationalizations[index] = r
# rationalizations = np.vstack(rationalizations)
# print(rationalizations)
# print(rationalizations.shape)
# print(torch.from_numpy(rationalizations))
# rationalizations = torch.from_numpy(rationalizations)
# print(np.asarray(rationalizations).reshape(rationalizations.shape,rationalizations.shape))
# Build data loader
data_loader = get_loader(args.image_dir, args.caption_path, vocab,
transform, args.batch_size,
shuffle=True, num_workers=args.num_workers)
# Build the models
encoder = EncoderCNN(args.embed_size)
decoder = DecoderRNN(args.embed_size, args.hidden_size,
len(vocab), args.num_layers)
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.bn.parameters())
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
frogger_data_loader = get_images('./data/FroggerDataset/',args.batch_size,transform)
# exit(0)
# Train the Models
# data = iter(frogger_data_loader)
# imgs = data.next()[0]
# print(imgs)
# print(frogger_data_loader[0])
# exit(0)
# for i,(images) in enumerate(frogger_data_loader):
# print(images)
total_step = len(frogger_data_loader)
for epoch in range(args.num_epochs):
for i,x in enumerate(frogger_data_loader):
# print(x)
# print(x[0])
# exit(0)
# print(x[0])
# exit(0)
images = to_var(x[0], volatile=True)
print(images[0][1])
exit(0)
captions = []
max_length = max(lengths[i:i+2])
rats = rationalizations[i:i+2]
rats.sort(key = lambda s: len(s))
rats.reverse()
# print(rats)
# exit(0)
for index,r in enumerate(rats):
# print(max_length)
r = np.lib.pad(r,(0,max_length - len(r)),'constant')
captions.append(r)
# rationalizations = np.vstack(rationalizations)
# captions.sort(key = lambda s: len(s))
captions = to_var(torch.from_numpy(np.asarray(captions)))
# lengths.append(len(rationalizations[i]))
new_lengths = []
# new_lengths.append(lengths[i])
new_lengths = lengths[i:i+2]
new_lengths.sort()
new_lengths.reverse()
captions = captions
# print(captions)
# print(new_lengths)
targets = pack_padded_sequence(captions, new_lengths, batch_first=True)[0]
decoder.zero_grad()
encoder.zero_grad()
# print(images)
features = encoder(images)
# print(features)
# print(rats)
# print(len(lengths))
outputs = decoder(features, captions, new_lengths)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# Print log info
if i % args.log_step == 0:
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
%(epoch, args.num_epochs, i, total_step,
loss.data[0], np.exp(loss.data[0])))
# Save the models
if (i+1) % args.save_step == 0:
torch.save(decoder.state_dict(),
os.path.join(args.model_path,
'decoder-%d-%d.pkl' %(epoch+1, i+1)))
torch.save(encoder.state_dict(),
os.path.join(args.model_path,
'encoder-%d-%d.pkl' %(epoch+1, i+1)))
# exit(0)
# total_step = len(data_loader)
# for epoch in range(args.num_epochs):
# for i, (images, captions, lengths) in enumerate(data_loader):
# # print(captions)
# # print(images)
# # print(lengths)
# # print(captions)
# # # print(images)
# # exit(0)
# # Set mini-batch dataset
# images = to_var(images, volatile=True)
# print(captions)
# captions = to_var(captions)
# print(captions)
# print(lengths)
# targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
# # Forward, Backward and Optimize
# decoder.zero_grad()
# encoder.zero_grad()
# print(images)
# features = encoder(images)
# print(features)
# exit(0)
# outputs = decoder(features, captions, lengths)
# loss = criterion(outputs, targets)
# loss.backward()
# optimizer.step()
# # Print log info
# if i % args.log_step == 0:
# print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
# %(epoch, args.num_epochs, i, total_step,
# loss.data[0], np.exp(loss.data[0])))
# # Save the models
# if (i+1) % args.save_step == 0:
# torch.save(decoder.state_dict(),
# os.path.join(args.model_path,
# 'decoder-%d-%d.pkl' %(epoch+1, i+1)))
# torch.save(encoder.state_dict(),
# os.path.join(args.model_path,
# 'encoder-%d-%d.pkl' %(epoch+1, i+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='./models/' ,
help='path for saving trained models')
parser.add_argument('--crop_size', type=int, default=224 ,
help='size for randomly cropping images')
parser.add_argument('--vocab_path', type=str, default='./data/vocab_frogger.pkl',
help='path for vocabulary wrapper')
parser.add_argument('--image_dir', type=str, default='./data/resized | max_length = length | conditional_block |
train.py | # Create model directory
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Image preprocessing
# For normalization, see https://github.com/pytorch/vision#models
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper.
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
#read rationalization data
rationalizations = []
max_length = 0
lengths = []
bad_worker_ids = ['A2CNSIECB9UP05','A23782O23HSPLA','A2F9ZBSR6AXXND','A3GI86L18Z71XY','AIXTI8PKSX1D2','A2QWHXMFQI18GQ','A3SB7QYI84HYJT',
'A2Q2A7AB6MMFLI','A2P1KI42CJVNIA','A1IJXPKZTJV809','A2WZ0RZMKQ2WGJ','A3EKETMVGU2PM9','A1OCEC1TBE3CWA','AE1RYK54MH11G','A2ADEPVGNNXNPA',
'A15QGLWS8CNJFU','A18O3DEA5Z4MJD','AAAL4RENVAPML','A3TZBZ92CQKQLG','ABO9F0JD9NN54','A8F6JFG0WSELT','ARN9ET3E608LJ','A2TCYNRAZWK8CC',
'A32BK0E1IPDUAF','ANNV3E6CIVCW4']
with open('./Log/Rationalizations.txt') as f:
for line in f:
line = line.lower()
line = re.sub('[^a-z\ \']+', " ", line)
words = line.split()
length = len(words)
lengths.append(length)
if length>max_length:
max_length = length
for index,word in enumerate(words):
words[index] = vocab.word2idx[word]
rationalizations.append(words)
# max_length = max(rationalizations,key=len
rationalizations=[np.array(xi) for xi in rationalizations]
# for index,r in enumerate(rationalizations):
# # print(max_length)
# r = np.lib.pad(r,(0,max_length - len(r)),'constant')
# rationalizations[index] = r
# rationalizations = np.vstack(rationalizations)
# print(rationalizations)
# print(rationalizations.shape)
# print(torch.from_numpy(rationalizations))
# rationalizations = torch.from_numpy(rationalizations)
# print(np.asarray(rationalizations).reshape(rationalizations.shape,rationalizations.shape))
# Build data loader
data_loader = get_loader(args.image_dir, args.caption_path, vocab,
transform, args.batch_size,
shuffle=True, num_workers=args.num_workers)
# Build the models
encoder = EncoderCNN(args.embed_size)
decoder = DecoderRNN(args.embed_size, args.hidden_size,
len(vocab), args.num_layers)
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.bn.parameters())
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
frogger_data_loader = get_images('./data/FroggerDataset/',args.batch_size,transform)
# exit(0)
# Train the Models
# data = iter(frogger_data_loader)
# imgs = data.next()[0]
# print(imgs)
# print(frogger_data_loader[0])
# exit(0)
# for i,(images) in enumerate(frogger_data_loader):
# print(images)
total_step = len(frogger_data_loader)
for epoch in range(args.num_epochs):
for i,x in enumerate(frogger_data_loader):
# print(x)
# print(x[0])
# exit(0)
# print(x[0])
# exit(0)
images = to_var(x[0], volatile=True)
print(images[0][1])
exit(0)
captions = []
max_length = max(lengths[i:i+2])
rats = rationalizations[i:i+2]
rats.sort(key = lambda s: len(s))
rats.reverse()
# print(rats)
# exit(0)
for index,r in enumerate(rats):
# print(max_length)
r = np.lib.pad(r,(0,max_length - len(r)),'constant')
captions.append(r)
# rationalizations = np.vstack(rationalizations)
# captions.sort(key = lambda s: len(s))
captions = to_var(torch.from_numpy(np.asarray(captions)))
# lengths.append(len(rationalizations[i]))
new_lengths = []
# new_lengths.append(lengths[i])
new_lengths = lengths[i:i+2]
new_lengths.sort() | new_lengths.reverse()
captions = captions
# print(captions)
# print(new_lengths)
targets = pack_padded_sequence(captions, new_lengths, batch_first=True)[0]
decoder.zero_grad()
encoder.zero_grad()
# print(images)
features = encoder(images)
# print(features)
# print(rats)
# print(len(lengths))
outputs = decoder(features, captions, new_lengths)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# Print log info
if i % args.log_step == 0:
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
%(epoch, args.num_epochs, i, total_step,
loss.data[0], np.exp(loss.data[0])))
# Save the models
if (i+1) % args.save_step == 0:
torch.save(decoder.state_dict(),
os.path.join(args.model_path,
'decoder-%d-%d.pkl' %(epoch+1, i+1)))
torch.save(encoder.state_dict(),
os.path.join(args.model_path,
'encoder-%d-%d.pkl' %(epoch+1, i+1)))
# exit(0)
# total_step = len(data_loader)
# for epoch in range(args.num_epochs):
# for i, (images, captions, lengths) in enumerate(data_loader):
# # print(captions)
# # print(images)
# # print(lengths)
# # print(captions)
# # # print(images)
# # exit(0)
# # Set mini-batch dataset
# images = to_var(images, volatile=True)
# print(captions)
# captions = to_var(captions)
# print(captions)
# print(lengths)
# targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
# # Forward, Backward and Optimize
# decoder.zero_grad()
# encoder.zero_grad()
# print(images)
# features = encoder(images)
# print(features)
# exit(0)
# outputs = decoder(features, captions, lengths)
# loss = criterion(outputs, targets)
# loss.backward()
# optimizer.step()
# # Print log info
# if i % args.log_step == 0:
# print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
# %(epoch, args.num_epochs, i, total_step,
# loss.data[0], np.exp(loss.data[0])))
# # Save the models
# if (i+1) % args.save_step == 0:
# torch.save(decoder.state_dict(),
# os.path.join(args.model_path,
# 'decoder-%d-%d.pkl' %(epoch+1, i+1)))
# torch.save(encoder.state_dict(),
# os.path.join(args.model_path,
# 'encoder-%d-%d.pkl' %(epoch+1, i+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='./models/' ,
help='path for saving trained models')
parser.add_argument('--crop_size', type=int, default=224 ,
help='size for randomly cropping images')
parser.add_argument('--vocab_path', type=str, default='./data/vocab_frogger.pkl',
help='path for vocabulary wrapper')
parser.add_argument('--image_dir', type=str, default='./data/resized | random_line_split |
|
train.py | (x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def main(args):
# Create model directory
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Image preprocessing
# For normalization, see https://github.com/pytorch/vision#models
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper.
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
#read rationalization data
rationalizations = []
max_length = 0
lengths = []
bad_worker_ids = ['A2CNSIECB9UP05','A23782O23HSPLA','A2F9ZBSR6AXXND','A3GI86L18Z71XY','AIXTI8PKSX1D2','A2QWHXMFQI18GQ','A3SB7QYI84HYJT',
'A2Q2A7AB6MMFLI','A2P1KI42CJVNIA','A1IJXPKZTJV809','A2WZ0RZMKQ2WGJ','A3EKETMVGU2PM9','A1OCEC1TBE3CWA','AE1RYK54MH11G','A2ADEPVGNNXNPA',
'A15QGLWS8CNJFU','A18O3DEA5Z4MJD','AAAL4RENVAPML','A3TZBZ92CQKQLG','ABO9F0JD9NN54','A8F6JFG0WSELT','ARN9ET3E608LJ','A2TCYNRAZWK8CC',
'A32BK0E1IPDUAF','ANNV3E6CIVCW4']
with open('./Log/Rationalizations.txt') as f:
for line in f:
line = line.lower()
line = re.sub('[^a-z\ \']+', " ", line)
words = line.split()
length = len(words)
lengths.append(length)
if length>max_length:
max_length = length
for index,word in enumerate(words):
words[index] = vocab.word2idx[word]
rationalizations.append(words)
# max_length = max(rationalizations,key=len
rationalizations=[np.array(xi) for xi in rationalizations]
# for index,r in enumerate(rationalizations):
# # print(max_length)
# r = np.lib.pad(r,(0,max_length - len(r)),'constant')
# rationalizations[index] = r
# rationalizations = np.vstack(rationalizations)
# print(rationalizations)
# print(rationalizations.shape)
# print(torch.from_numpy(rationalizations))
# rationalizations = torch.from_numpy(rationalizations)
# print(np.asarray(rationalizations).reshape(rationalizations.shape,rationalizations.shape))
# Build data loader
data_loader = get_loader(args.image_dir, args.caption_path, vocab,
transform, args.batch_size,
shuffle=True, num_workers=args.num_workers)
# Build the models
encoder = EncoderCNN(args.embed_size)
decoder = DecoderRNN(args.embed_size, args.hidden_size,
len(vocab), args.num_layers)
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.bn.parameters())
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
frogger_data_loader = get_images('./data/FroggerDataset/',args.batch_size,transform)
# exit(0)
# Train the Models
# data = iter(frogger_data_loader)
# imgs = data.next()[0]
# print(imgs)
# print(frogger_data_loader[0])
# exit(0)
# for i,(images) in enumerate(frogger_data_loader):
# print(images)
total_step = len(frogger_data_loader)
for epoch in range(args.num_epochs):
for i,x in enumerate(frogger_data_loader):
# print(x)
# print(x[0])
# exit(0)
# print(x[0])
# exit(0)
images = to_var(x[0], volatile=True)
print(images[0][1])
exit(0)
captions = []
max_length = max(lengths[i:i+2])
rats = rationalizations[i:i+2]
rats.sort(key = lambda s: len(s))
rats.reverse()
# print(rats)
# exit(0)
for index,r in enumerate(rats):
# print(max_length)
r = np.lib.pad(r,(0,max_length - len(r)),'constant')
captions.append(r)
# rationalizations = np.vstack(rationalizations)
# captions.sort(key = lambda s: len(s))
captions = to_var(torch.from_numpy(np.asarray(captions)))
# lengths.append(len(rationalizations[i]))
new_lengths = []
# new_lengths.append(lengths[i])
new_lengths = lengths[i:i+2]
new_lengths.sort()
new_lengths.reverse()
captions = captions
# print(captions)
# print(new_lengths)
targets = pack_padded_sequence(captions, new_lengths, batch_first=True)[0]
decoder.zero_grad()
encoder.zero_grad()
# print(images)
features = encoder(images)
# print(features)
# print(rats)
# print(len(lengths))
outputs = decoder(features, captions, new_lengths)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# Print log info
if i % args.log_step == 0:
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
%(epoch, args.num_epochs, i, total_step,
loss.data[0], np.exp(loss.data[0])))
# Save the models
if (i+1) % args.save_step == 0:
torch.save(decoder.state_dict(),
os.path.join(args.model_path,
'decoder-%d-%d.pkl' %(epoch+1, i+1)))
torch.save(encoder.state_dict(),
os.path.join(args.model_path,
'encoder-%d-%d.pkl' %(epoch+1, i+1)))
# exit(0)
# total_step = len(data_loader)
# for epoch in range(args.num_epochs):
# for i, (images, captions, lengths) in enumerate(data_loader):
# # print(captions)
# # print(images)
# # print(lengths)
# # print(captions)
# # # print(images)
# # exit(0)
# # Set mini-batch dataset
# images = to_var(images, volatile=True)
# print(captions)
# captions = to_var(captions)
# print(captions)
# print(lengths)
# targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
# # Forward, Backward and Optimize
# decoder.zero_grad()
# encoder.zero_grad()
# print(images)
# features = encoder(images)
# print(features)
# exit(0)
# outputs = decoder(features, captions, lengths)
# loss = criterion(outputs, targets)
# loss.backward()
# optimizer.step()
# # Print log info
# if i % args.log_step == 0:
# print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
# %(epoch, args.num_epochs, i, total_step,
# loss.data[0], np.exp(loss.data[0])))
# # Save the models
# if (i+1) % args.save_step == 0:
# torch.save(decoder.state_dict(),
# os.path.join(args.model_path,
# 'decoder-%d-%d.pkl' %(epoch+1, i+1)))
# torch.save(encoder.state_dict(),
# os.path.join(args.model_path,
# 'encoder-%d-%d.pkl' %(epoch+1, i+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='./models/' ,
help='path for saving trained models')
parser.add_argument('--crop_size', type=int, default=224 ,
help='size for randomly cropping images')
parser.add_argument('--vocab_path', type=str, | to_var | identifier_name |
|
tasks.py | mailchimp for list id " + list_id)
else:
logger.info(
"cleared " + str(delete_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
if active_subscribed.count() == add_count:
logger.info("successfully synced all subscriptions for list id " + list_id)
else:
|
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = get_resource_doi(act_doi, 'pending')
res.save()
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, sf_aggregation, sf_zip=False):
""" Create temporary zip file from input_path and store in output_path
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param sf_aggregation: if True, include logical metadata files
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
istorage = res | logger.info("added " + str(add_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id) | conditional_block |
tasks.py | mailchimp for list id " + list_id)
else:
logger.info(
"cleared " + str(delete_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
if active_subscribed.count() == add_count:
logger.info("successfully synced all subscriptions for list id " + list_id)
else:
logger.info("added " + str(add_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = get_resource_doi(act_doi, 'pending')
res.save()
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
| uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, sf_aggregation, sf_zip=False):
""" Create temporary zip file from input_path and store in output_path
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param sf_aggregation: if True, include logical metadata files
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
istorage = res.get | hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
| identifier_body |
tasks.py | mailchimp for list id " + list_id)
else:
logger.info(
"cleared " + str(delete_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
if active_subscribed.count() == add_count:
logger.info("successfully synced all subscriptions for list id " + list_id)
else:
logger.info("added " + str(add_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = get_resource_doi(act_doi, 'pending')
res.save()
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def | (pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, sf_aggregation, sf_zip=False):
""" Create temporary zip file from input_path and store in output_path
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param sf_aggregation: if True, include logical metadata files
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
istorage = res | add_zip_file_contents_to_resource | identifier_name |
tasks.py | mailchimp for list id " + list_id)
else:
logger.info(
"cleared " + str(delete_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
if active_subscribed.count() == add_count:
logger.info("successfully synced all subscriptions for list id " + list_id)
else:
logger.info("added " + str(add_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = get_resource_doi(act_doi, 'pending')
res.save()
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
| DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, sf_aggregation, sf_zip=False):
""" Create temporary zip file from input_path and store in output_path
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param sf_aggregation: if True, include logical metadata files
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
istorage = | USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
| random_line_split |
controllers.js | effects', 30],
],
name: ' ',
//data:[50,40],
dataLabels: {
rotation: 270,
enabled: false,
format: '{series.name}: <b>{series.percentage:.1f}%</b>',
}
}],
title: {
text: 'Medicine Details'
},
tooltip: {
valueDecimals: 2,
valueSuffix: ' USD',
pointFormat: '{series.name}: <b>{series.percentage:.1f}%</b>'
},
credits: {
enabled: false
},
loading: false
}
})
.controller('LoginCntrl', function($scope,$state) {
$scope.dform={};
$scope.ptform={};
$scope.phform={};
$scope.login = function() {
var user=$scope.dform.username;
var dpass=$scope.dform.password;
var duser=user.toLowerCase();
// if(duser.substr(0,6)=="doctor" && dpass=="doctor123")
if(duser=="blanco" && dpass=="blanco123")
$state.go("tab.dash");
else
document.getElementById("loginErr").innerHTML="Invalid Credentials";
};
$scope.ptLogin = function() {
var tuser=$scope.ptform.username;
var ptpass=$scope.ptform.password;
var ptuser=tuser.toLowerCase();
// if(ptuser.substr(0,7)=="patient" && ptpass=="patient123")
if(ptuser=="sheldon" && ptpass=="sheldon123")
$state.go("patient.dash");
else
document.getElementById("ptErr").innerHTML="Invalid Credentials";
};
$scope.phLogin = function() {
var huser=$scope.phform.username;
var phpass=$scope.phform.password;
var phuser=huser.toLowerCase();
if(phuser.substr(0,8)=="pharmacy" && phpass=="pharmacy123")
$state.go("pharmacy.dash");
else
document.getElementById("phErr").innerHTML="Invalid Credentials";
};
})
.controller('ChatDetailCtrl', function($scope, $stateParams, Chats) {
$scope.chat = Chats.get($stateParams.chatId);
})
.controller('diseasectrl', function($scope, $stateParams,$ionicHistory,Diseases,$ionicPopup) {
/*$scope.goBack = function() {
$ionicHistory.goBack();
};*/
$scope.diseases = Diseases.all();
$scope.remove = function(disease) {
Diseases.remove(disease); }
$scope.showAlert = function(m) {
var alertPopup = $ionicPopup.alert({
title: m,
templateUrl: 'templates/templates/diseasesmed.html'
});
};
})
.controller('diseasectrlPh', function($scope, $stateParams,$ionicHistory,Diseases,$ionicPopup) {
/*$scope.goBack = function() {
$ionicHistory.goBack();
};*/
$scope.diseases = Diseases.all();
$scope.remove = function(disease) {
Diseases.remove(disease); }
$scope.showAlert = function(m) {
var alertPopup = $ionicPopup.alert({
title: m,
templateUrl: 'templates/templatesPh/diseasesmed.html'
});
};
})
.controller('PatientsCtrl', function($scope, Patients,$ionicHistory,$ionicPopup) {
$scope.patients = Patients.all();
$scope.remove = function(patient) {
Patients.remove(patient);
};
$scope.showAlert = function(nam) {
var alertPopup = $ionicPopup.alert({
title: nam,
templateUrl: 'templates/templatesPh/patientmed.html'
});
};
})
.controller('searchcontroller', function($ionicHistory,$scope,$http,$ionicLoading,$timeout,medicines) {
$scope.Result;
$scope.flag=false;
$scope.getMed = function (k) {
$scope.med=medicines.get(k);
}
$scope.getValue = function (key) {
$scope.flag=false;
if($scope.search.length>1){
var id =$scope.search;
//$scope.loading = $ionicLoading.show({content: 'Loading...'});
$scope.mmed=medicines.all();
//alert(JSON.stringify($scope.mmed))
if($scope.mmed.length==0){
$scope.flag=true;
$scope.Result="There are no products matching the selection.";
}
$scope.result=$scope.mmed;
}
}
})
.controller('medctrl', function($scope, $stateParams,medicines,$ionicPopup) {
$scope.med=medicines.all();
$scope.medAlert = function(t) {
var alertPopup = $ionicPopup.alert({
title: t,
templateUrl: 'templates/templates/patientmed.html'
});
};
})
.controller('medctrlPh', function($scope, $stateParams,medicines,$ionicPopup) {
$scope.med=medicines.all();
$scope.medAlert = function(t) {
var alertPopup = $ionicPopup.alert({
title: t,
templateUrl: 'templates/templatesPh/pharmacymed.html'
});
};
})
.controller('DoctCtrl', function($scope, Docts,$ionicHistory,$ionicPopup) {
$scope.patients = Docts.all();
$scope.remove = function(patient) {
Docts.remove(patient);
};
$scope.showAlert = function(nam) {
var alertPopup = $ionicPopup.alert({
title: nam,
templateUrl: 'templates/templatesPh/patientmed1.html'
});
};
})
/*
.controller('MedCtrl', function($scope, medicines,$ionicHistory,$ionicPopup) {
$scope.meds = medicines.all();
$scope.showAlert = function() {
var alertPopup = $ionicPopup.alert({
title: 'Acne',
templateUrl: 'templates/templatesPh/patientmed.html'
});
};
})*/
.controller('PatientDetailCtrl', function($scope, $stateParams, Patients) {
$scope.patient = Patients.get($stateParams.patientsId);
})
.controller('AppointDetailCtrl', function($scope, $stateParams, Patients) {
$scope.patient = Patients.get($stateParams.appointId);
})
.controller('Action', function($scope, $ionicActionSheet,$location, $state,ContactService,Patients) {
/* $scope.showActionsheetDoc = function() {
$ionicActionSheet.show({
buttons: [
{ text: '<div class="padding" >Add Patient</div>' }
],
cancelText: 'Cancel',
cancel: function() {
},
buttonClicked: function(index) {
if(index === 0){ // Allgemein
$state.go('tab.addPrescription');
}
return true;
},
destructiveButtonClicked: function() {
return true;
}
});
};
*/
$scope.showActionsheet = function() {
$ionicActionSheet.show({
buttons: [
{ text: '<div class="padding" >Add Prescription</div>' }
],
cancelText: 'Cancel',
cancel: function() {
},
buttonClicked: function(index) {
if(index === 0){ // Allgemein
$state.go('pharmacy.addPrescription');
}
return true;
},
destructiveButtonClicked: function() {
return true;
}
});
};
$scope.cancelAddress=function(){
$state.go('pharmacy.patients');
};
//uid=3;
$scope.contacts = ContactService.list();
//alert(JSON.stringify($scope.contacts));
$scope.newcontact = {};
$scope.saveAddress=function(){
//alert($scope.newcontact.fname)
ContactService.save($scope.newcontact);
$state.go("pharmacy.patientaddress");
};
$scope.cancel=function(){
$state.go("pharmacy.addPrescription");
}; | $scope.add={};
alert($scope.add.fname);
var id1=$scope.p[$scope.p.length-1].id+1;
var savep={
id: id1,
face: 'img/mike.png',
name: $scope.add.fname,
PatientID: '12556'+id1,
Age : $scope.add.fname,
message : 'Hi doctor',
date:'26/3/2017 3.20PM',
gender:'Male'
};
// Patients.save(savep);
$state.go("pharmacy.patients");
};
})
.controller('AddressCtrl', function($scope, $stateParams) {
//alert(JSON.stringify($stateParams.addr)+" sss");
//$scope.fname=$stateParams.addr;
$scope.addr=$stateParams.addr;
alert($scope.addr);
})
.controller('MedicinesCtrl', function($scope,$state, $ionicPopup) {
$scope.showAlert = function() {
//alert()
/*var alertPopup = $ionicPopup.alert({
title: 'Ibuprofen',
templateUrl: 'templates/medicinedetail.html'
}); */
$state.go("tab.medicineView");
};
})
.controller('mainctrl', function($scope,$state,Questions,PQuestions,PHQuestions,$location,$ionicHistory) {
$scope.goBack = function() {
$ionicHistory.goBack();
};
$scope.questions = Questions.all();
$scope.pquestions = PQuestions.all();
$scope.phquestions = PHQuestions.all();
//alert(JSON.stringify($scope.contacts))
/* $scope.qashow="true";
$scope.quesshow="true";*/
$scope.clearSearch = function |
$scope.done=function(){
$scope.p=Patients.all(); | random_line_split |
controllers.js | $scope.phLogin = function() {
var huser=$scope.phform.username;
var phpass=$scope.phform.password;
var phuser=huser.toLowerCase();
if(phuser.substr(0,8)=="pharmacy" && phpass=="pharmacy123")
$state.go("pharmacy.dash");
else
document.getElementById("phErr").innerHTML="Invalid Credentials";
};
})
.controller('ChatDetailCtrl', function($scope, $stateParams, Chats) {
$scope.chat = Chats.get($stateParams.chatId);
})
.controller('diseasectrl', function($scope, $stateParams,$ionicHistory,Diseases,$ionicPopup) {
/*$scope.goBack = function() {
$ionicHistory.goBack();
};*/
$scope.diseases = Diseases.all();
$scope.remove = function(disease) {
Diseases.remove(disease); }
$scope.showAlert = function(m) {
var alertPopup = $ionicPopup.alert({
title: m,
templateUrl: 'templates/templates/diseasesmed.html'
});
};
})
.controller('diseasectrlPh', function($scope, $stateParams,$ionicHistory,Diseases,$ionicPopup) {
/*$scope.goBack = function() {
$ionicHistory.goBack();
};*/
$scope.diseases = Diseases.all();
$scope.remove = function(disease) {
Diseases.remove(disease); }
$scope.showAlert = function(m) {
var alertPopup = $ionicPopup.alert({
title: m,
templateUrl: 'templates/templatesPh/diseasesmed.html'
});
};
})
.controller('PatientsCtrl', function($scope, Patients,$ionicHistory,$ionicPopup) {
$scope.patients = Patients.all();
$scope.remove = function(patient) {
Patients.remove(patient);
};
$scope.showAlert = function(nam) {
var alertPopup = $ionicPopup.alert({
title: nam,
templateUrl: 'templates/templatesPh/patientmed.html'
});
};
})
.controller('searchcontroller', function($ionicHistory,$scope,$http,$ionicLoading,$timeout,medicines) {
$scope.Result;
$scope.flag=false;
$scope.getMed = function (k) {
$scope.med=medicines.get(k);
}
$scope.getValue = function (key) {
$scope.flag=false;
if($scope.search.length>1){
var id =$scope.search;
//$scope.loading = $ionicLoading.show({content: 'Loading...'});
$scope.mmed=medicines.all();
//alert(JSON.stringify($scope.mmed))
if($scope.mmed.length==0){
$scope.flag=true;
$scope.Result="There are no products matching the selection.";
}
$scope.result=$scope.mmed;
}
}
})
.controller('medctrl', function($scope, $stateParams,medicines,$ionicPopup) {
$scope.med=medicines.all();
$scope.medAlert = function(t) {
var alertPopup = $ionicPopup.alert({
title: t,
templateUrl: 'templates/templates/patientmed.html'
});
};
})
.controller('medctrlPh', function($scope, $stateParams,medicines,$ionicPopup) {
$scope.med=medicines.all();
$scope.medAlert = function(t) {
var alertPopup = $ionicPopup.alert({
title: t,
templateUrl: 'templates/templatesPh/pharmacymed.html'
});
};
})
.controller('DoctCtrl', function($scope, Docts,$ionicHistory,$ionicPopup) {
$scope.patients = Docts.all();
$scope.remove = function(patient) {
Docts.remove(patient);
};
$scope.showAlert = function(nam) {
var alertPopup = $ionicPopup.alert({
title: nam,
templateUrl: 'templates/templatesPh/patientmed1.html'
});
};
})
/*
.controller('MedCtrl', function($scope, medicines,$ionicHistory,$ionicPopup) {
$scope.meds = medicines.all();
$scope.showAlert = function() {
var alertPopup = $ionicPopup.alert({
title: 'Acne',
templateUrl: 'templates/templatesPh/patientmed.html'
});
};
})*/
.controller('PatientDetailCtrl', function($scope, $stateParams, Patients) {
$scope.patient = Patients.get($stateParams.patientsId);
})
.controller('AppointDetailCtrl', function($scope, $stateParams, Patients) {
$scope.patient = Patients.get($stateParams.appointId);
})
.controller('Action', function($scope, $ionicActionSheet,$location, $state,ContactService,Patients) {
/* $scope.showActionsheetDoc = function() {
$ionicActionSheet.show({
buttons: [
{ text: '<div class="padding" >Add Patient</div>' }
],
cancelText: 'Cancel',
cancel: function() {
},
buttonClicked: function(index) {
if(index === 0){ // Allgemein
$state.go('tab.addPrescription');
}
return true;
},
destructiveButtonClicked: function() {
return true;
}
});
};
*/
$scope.showActionsheet = function() {
$ionicActionSheet.show({
buttons: [
{ text: '<div class="padding" >Add Prescription</div>' }
],
cancelText: 'Cancel',
cancel: function() {
},
buttonClicked: function(index) {
if(index === 0){ // Allgemein
$state.go('pharmacy.addPrescription');
}
return true;
},
destructiveButtonClicked: function() {
return true;
}
});
};
$scope.cancelAddress=function(){
$state.go('pharmacy.patients');
};
//uid=3;
$scope.contacts = ContactService.list();
//alert(JSON.stringify($scope.contacts));
$scope.newcontact = {};
$scope.saveAddress=function(){
//alert($scope.newcontact.fname)
ContactService.save($scope.newcontact);
$state.go("pharmacy.patientaddress");
};
$scope.cancel=function(){
$state.go("pharmacy.addPrescription");
};
$scope.done=function(){
$scope.p=Patients.all();
$scope.add={};
alert($scope.add.fname);
var id1=$scope.p[$scope.p.length-1].id+1;
var savep={
id: id1,
face: 'img/mike.png',
name: $scope.add.fname,
PatientID: '12556'+id1,
Age : $scope.add.fname,
message : 'Hi doctor',
date:'26/3/2017 3.20PM',
gender:'Male'
};
// Patients.save(savep);
$state.go("pharmacy.patients");
};
})
.controller('AddressCtrl', function($scope, $stateParams) {
//alert(JSON.stringify($stateParams.addr)+" sss");
//$scope.fname=$stateParams.addr;
$scope.addr=$stateParams.addr;
alert($scope.addr);
})
.controller('MedicinesCtrl', function($scope,$state, $ionicPopup) {
$scope.showAlert = function() {
//alert()
/*var alertPopup = $ionicPopup.alert({
title: 'Ibuprofen',
templateUrl: 'templates/medicinedetail.html'
}); */
$state.go("tab.medicineView");
};
})
.controller('mainctrl', function($scope,$state,Questions,PQuestions,PHQuestions,$location,$ionicHistory) {
$scope.goBack = function() {
$ionicHistory.goBack();
};
$scope.questions = Questions.all();
$scope.pquestions = PQuestions.all();
$scope.phquestions = PHQuestions.all();
//alert(JSON.stringify($scope.contacts))
/* $scope.qashow="true";
$scope.quesshow="true";*/
$scope.clearSearch = function() {
$scope.search = '';
};
})
.controller('maindetailctrl', function($scope, $stateParams,$ionicHistory, Questions,PQuestions,PHQuestions) {
$scope.question = Questions.get($stateParams.questionId);
$scope.pquestion = PQuestions.get($stateParams.questionId);
$scope.phquestion = PHQuestions.get($stateParams.questionId);
$scope.goBack = function() {
$ionicHistory.goBack();
};
})
.controller('AccountCtrl', function($scope,$ionicHistory,$state) {
$scope.settings = {
enableFriends: true
};
$scope.logout=function(){
$ionicHistory.clearCache();
$ionicHistory.clearHistory();
$ionicHistory.nextViewOptions({ disableBack: true, historyRoot: true });
$state.go("loginHome");
}
})
.service('ContactService', function () {
//to create unique contact id
var uid = 15567;
//contacts array to hold list of all contacts
var contacts = [];
//save method create a new contact if not already exists
//else update the existing object
this.save = function (contact) {
if (contact.id == null) {
//if this is new contact, add it in contacts array
contact.id = uid++;
contacts.push(contact);
} else {
//for existing contact, find this contact using id
//and update it.
for (i in contacts) {
if (contacts[i].id == contact.id) {
contacts[i] = contact;
}
}
}
}
//simply search contacts list for given id
//and returns the contact object if found
this.get = function (id) {
for (i in contacts) {
if (contacts[i].id == id) | {
return contacts[i];
} | conditional_block |
|
mamikon.js | />>>>>>>>>>>>>>>>>>>>>>*/
/* filter >>>>>>>>>>>>>>>>>>>>>>*/
var closestsElementClass = function (elem, className) {
var node = elem;
while (node) {
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
node = node.parentElement;
}
return null;
}
var catalog = document.querySelector('.portfolio-content');
//блок с табами
var catalogNav = document.querySelector('.portfolio-filter');
var catalogItems = document.querySelectorAll('.portfolio-content__item');
//Очистка блока с элементами, чтобы при фильрации добавлялись новые в чиситый блок
function removeChildren(item) {
while (item.firstChild) {
item.removeChild(item.firstChild)
}
}
//обновляем элементы в каталоге | item это блок каталога
function updateChildren(item, children) {
removeChildren(item);
for (var i = 0; i < children.length; i++) {
item.appendChild(children[i]);
}
}
catalogNav.addEventListener('click', function (e) {
var target = e.target;
var item = closestsElementClass(target, 'portfolio-filter__link');
if (item === null || item.classList.contains('is-active')) {
return;
}
loadContent();
e.preventDefault();
//Получаем значение из атрибута data-filter=""
var filterValue = item.getAttribute('data-filter');
var previousActiveBtn = document.querySelector('.portfolio-filter__link.is-active');
previousActiveBtn.classList.remove('is-active');
item.classList.add('is-active');
//Если выбраны ВСЕ, то просто их всех выводим
if (filterValue === 'all') {
updateChildren(catalog, catalogItems);
return;
}
//Отфильтрованные элементы перемещаем в массив
var filteredItems = [];
for (var i = 0; i < catalogItems.length; i++) {
var currentItem = catalogItems[i];
if (currentItem.getAttribute('data-category') === filterValue) {
filteredItems.push(currentItem);
}
}
updateChildren(catalog, filteredItems);
});
/* filter />>>>>>>>>>>>>>>>>>>>>>*/
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
var smoothScroll = function (targetEl, duration) {
var headerElHeight = document.querySelector('#header').clientHeight; // класс хедера
var target = document.querySelector(targetEl);
var targetPosition = target.getBoundingClientRect().top; //- headerElHeight; //вычитаем размер хедера, если он фиксированный
var startPosition = window.pageYOffset;
var startTime = null;
var ease = function (t, b, c, d) {
t /= d / 2;
if (t < 1) return c / 2 * t * t + b;
t--;
return -c / 2 * (t * (t - 2) - 1) + b;
};
var animation = function (currentTime) {
if (startTime === null) startTime = currentTime;
var timeElapsed = currentTime - startTime;
var run = ease(timeElapsed, startPosition, targetPosition, duration);
window.scrollTo(0, run);
if (timeElapsed < duration) requestAnimationFrame(animation);
};
requestAnimationFrame(animation);
};
var scrollTo = function () {
var headerNav = document.querySelector('.navigation');
//var links = document.querySelectorAll('.js-scroll'); //добавляем классы к линкам
var links = document.querySelectorAll('.nav-link'); //добавляем классы к линкам
links.forEach(each => {
each.addEventListener('click', function () {
var currentTarget = this.getAttribute('href');
smoothScroll(currentTarget, 1000);
//выход из мобильного меню
headerNav.classList.remove('active');
document.querySelector("#burger").classList.remove('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
});
});
};
scrollTo();
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
/* мобильное меню >>>>>>>>>>>>>>>>>>>>>>*/
var burger = document.querySelector("#burger");
var navigation = document.querySelector(".navigation");
var contactBtn = document.querySelector(".hire--btn");
burger.addEventListener('click', function () {
body.classList.toggle('overflow')
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
})
contactBtn.addEventListener('click', function () {
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
})
/* мобильное меню />>>>>>>>>>>>>>>>>>>>>>*/
//filter >>>>>>>>>>>>>>>>>>>>>>*/
var filter = document.querySelector("#filter");
var filterBlock = document.querySelector(".portfolio-filter");
filter.addEventListener('click', function () {
filterBlock.classList.toggle('open');
})
//filter />>>>>>>>>>>>>>>>>>>>>>*/
//popup >>>>>>>>>>>>>>>>>>>>>>*/
//Функция для поиска атрибута по вложенным тегам
var closestsElementAttr = function (elem, attr) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* атрибут родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
var attribute = node.getAttribute(attr);
if (attribute) {
return attribute; //атрибут есть — значит его и возвращаем, прекращая функцию
}
/*/ если атрибут пуст, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется атрибут,
* иначе return null
/*/
node = node.parentElement;
}
//возврат null если нет нашего атрибута ни у элемента, ни у его дочерних узлов
return null;
}
//Поиск ближайшего элемента по классу
var closestsElementClass = function (elem, className) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* класс родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
/*/ если текущий элемент содержит тот класс, который мы ему передали,
* при вызове функции, то просто возвращаем этот элемент,
/*/
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
/*/ если класса нет, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется класс,
* который мы передали, иначе return null
/*/
node = node.parentElement;
}
//возврат null если нет нашего класса ни у элемента, ни у его дочерних узлов
return null;
}
//Показ попапа
function showPopup(target) {
target.classList.add('is-active');
}
//Скрытие попапа
function closePopup(target) {
target.classList.remove('is-active');
}
//BODY overflow hidden, чтобы при открытом попапе фон не скролился
function bodyOverflow() {
body.classList.toggle('overflow');
}
//Открытие попапа при клике на бургер меню
body.addEventListener('click', function (e) {
var target = e.target;
//Поиск названия data-popup, который задан у кнопки бургера
//var popupClass = target.getAttribute('data-popup');
var popupClass = closestsElementAttr(target, 'data-popup');
//если элемент, на котором кликнули, не имеет аттрибут data-popup, то выходим
if (popupClass === null) {
return;
}
e.preventDefault();
var popup = document.querySelector('.' + popupClass);
if (popup) {
showPopup(popup);
bodyOverflow();
}
})
//Закрытие попапа при клике X или на область вне попапа
body.addEventListener('click', function (e) {
var target = e.target;
//Если клик был на кнопку Х или фон вне попапа, то закрываем его
if (e.target.classList.contains('popup__close') || e.target.classList.contains('popup__wrapper') || e.target.classList.contains('popup__inner')) {
//поиск той кноки Х, которая относится к конкретному попапу
var pop | up = clos | identifier_name |
|
mamikon.js | Arrow.addEventListener('click', function () {
changeSlideLeft()
})
/* Slider />>>>>>>>>>>>>>>>>>>*/
/* carousel >>>>>>>>>>>>>>>>>>>*/
var root = document.documentElement;
var carouselElementsDisplayed = getComputedStyle(root).getPropertyValue("--carousel-elements-displayed");
var carouselContent = document.querySelector("ul.carousel-content");
root.style.setProperty("--carousel-elements", carouselContent.children.length);
for (var i = 0; i < carouselElementsDisplayed; i++) {
carouselContent.appendChild(carouselContent.children[i].cloneNode(true));
}
/* carousel />>>>>>>>>>>>>>>>>>>*/
//load-more >>>>>>>>>>>>>>>>>>>>>>*/
var loadMore = document.querySelector("#load-more");
var hiddenWorks = document.querySelectorAll(".portfolio-hidden");
var catalogBlock = document.querySelector('.portfolio-content');
loadMore.addEventListener('click', loadContent);
function loadContent() {
loadMore.style.display = 'none';
for (var i = 0; i < hiddenWorks.length; i++) {
catalogBlock.appendChild(hiddenWorks[i]);
}
}
//load-more />>>>>>>>>>>>>>>>>>>>>>*/
/* filter >>>>>>>>>>>>>>>>>>>>>>*/
var closestsElementClass = function (elem, className) {
var node = elem;
while (node) {
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
node = node.parentElement;
}
return null;
}
var catalog = document.querySelector('.portfolio-content');
//блок с табами
var catalogNav = document.querySelector('.portfolio-filter');
var catalogItems = document.querySelectorAll('.portfolio-content__item');
//Очистка блока с элементами, чтобы при фильрации добавлялись новые в чиситый блок
function removeChildren(item) {
while (item.firstChild) {
item.removeChild(item.firstChild)
}
}
//обновляем элементы в каталоге | item это блок каталога
function updateChildren(item, children) {
removeChildren(item);
for (var i = 0; i < children.length; i++) {
item.appendChild(children[i]);
}
}
catalogNav.addEventListener('click', function (e) {
var target = e.target;
var item = closestsElementClass(target, 'portfolio-filter__link');
if (item === null || item.classList.contains('is-active')) {
return;
}
loadContent();
e.preventDefault();
//Получаем значение из атрибута data-filter=""
var filterValue = item.getAttribute('data-filter');
var previousActiveBtn = document.querySelector('.portfolio-filter__link.is-active');
previousActiveBtn.classList.remove('is-active');
item.classList.add('is-active');
//Если выбраны ВСЕ, то просто их всех выводим
if (filterValue === 'all') {
updateChildren(catalog, catalogItems);
return;
}
//Отфильтрованные элементы перемещаем в массив
var filteredItems = [];
for (var i = 0; i < catalogItems.length; i++) {
var currentItem = catalogItems[i];
if (currentItem.getAttribute('data-category') === filterValue) {
filteredItems.push(currentItem);
}
}
updateChildren(catalog, filteredItems);
});
/* filter />>>>>>>>>>>>>>>>>>>>>>*/
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
var smoothScroll = function (targetEl, duration) {
var headerElHeight = document.querySelector('#header').clientHeight; // класс хедера
var target = document.querySelector(targetEl);
var targetPosition = target.getBoundingClientRect().top; //- headerElHeight; //вычитаем размер хедера, если он фиксированный
var startPosition = window.pageYOffset;
var startTime = null;
var ease = function (t, b, c, d) {
t /= d / 2;
if (t < 1) return c / 2 * t * t + b;
t--;
return -c / 2 * (t * (t - 2) - 1) + b;
};
var animation = function (currentTime) {
if (startTime === null) startTime = currentTime;
var timeElapsed = currentTime - startTime;
var run = ease(timeElapsed, startPosition, targetPosition, duration);
window.scrollTo(0, run);
if (timeElapsed < duration) requestAnimationFrame(animation);
};
requestAnimationFrame(animation);
};
var scrollTo = function () {
var headerNav = document.querySelector('.navigation');
//var links = document.querySelectorAll('.js-scroll'); //добавляем классы к линкам
var links = document.querySelectorAll('.nav-link'); //добавляем классы к линкам
links.forEach(each => {
each.addEventListener('click', function () {
var currentTarget = this.getAttribute('href');
smoothScroll(currentTarget, 1000);
//выход из мобильного меню
headerNav.classList.remove('active');
document.querySelector("#burger").classList.remove('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
});
});
};
scrollTo();
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
/* мобильное меню >>>>>>>>>>>>>>>>>>>>>>*/
var burger = document.querySelector("#burger");
var navigation = document.querySelector(".navigation");
var contactBtn = document.querySelector(".hire--btn");
burger.addEventListener('click', function () {
body.classList.toggle('overflow')
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
})
contactBtn.addEventListener('click', function () {
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
})
/* мобильное меню />>>>>>>>>>>>>>>>>>>>>>*/
//filter >>>>>>>>>>>>>>>>>>>>>>*/
var filter = document.querySelector("#filter");
var filterBlock = document.querySelector(".portfolio-filter");
filter.addEventListener('click', function () {
filterBlock.classList.toggle('open');
})
//filter />>>>>>>>>>>>>>>>>>>>>>*/
//popup >>>>>>>>>>>>>>>>>>>>>>*/
//Функция для поиска атрибута по вложенным тегам
var closestsElementAttr = function (elem, attr) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* атрибут родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
var attribute = node.getAttribute(attr);
if (attribute) {
return attribute; //атрибут есть — значит его и возвращаем, прекращая функцию
}
/*/ если атрибут пуст, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется атрибут,
* иначе return null
/*/
node = node.parentElement;
}
//возврат null если нет нашего атрибута ни у элемента, ни у его дочерних узлов
return null;
}
//Поиск ближайшего элемента по классу
var closestsElementClass = function (elem, className) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* класс родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
/*/ если текущий элемент содержит тот класс, который мы ему передали,
* при вызове функции, то просто возвращаем этот элемент,
/*/
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
/*/ если класса нет, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется класс,
* который мы передали, иначе return null
/*/
node = node.parentElement;
}
//возврат null если нет нашего класса ни у элемента, ни у его дочерних узлов
return null;
}
//Показ попапа
function showPopup(target) {
target.classList.add('is-active');
}
//Скрытие попапа
function closePopup(target) {
target.classList.remove('is-active');
}
//BODY overflow hidden, чтобы при открытом попапе фон не скролился
function bodyOverflow() {
body.classList.toggle('overflow');
}
//Открытие попапа при клике на бургер меню
body.addEventListener('click', function (e) { | //Поиск названия data-popup, который задан у кнопки бургера
//var popupClass = target.getAttribute('data-popup');
var popupClass | var target = e.target; | random_line_split |
mamikon.js | -displayed");
var carouselContent = document.querySelector("ul.carousel-content");
root.style.setProperty("--carousel-elements", carouselContent.children.length);
for (var i = 0; i < carouselElementsDisplayed; i++) {
carouselContent.appendChild(carouselContent.children[i].cloneNode(true));
}
/* carousel />>>>>>>>>>>>>>>>>>>*/
//load-more >>>>>>>>>>>>>>>>>>>>>>*/
var loadMore = document.querySelector("#load-more");
var hiddenWorks = document.querySelectorAll(".portfolio-hidden");
var catalogBlock = document.querySelector('.portfolio-content');
loadMore.addEventListener('click', loadContent);
function loadContent() {
loadMore.style.display = 'none';
for (var i = 0; i < hiddenWorks.length; i++) {
catalogBlock.appendChild(hiddenWorks[i]);
}
}
//load-more />>>>>>>>>>>>>>>>>>>>>>*/
/* filter >>>>>>>>>>>>>>>>>>>>>>*/
var closestsElementClass = function (elem, className) {
var node = elem;
while (node) {
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
node = node.parentElement;
}
return null;
}
var catalog = document.querySelector('.portfolio-content');
//блок с табами
var catalogNav = document.querySelector('.portfolio-filter');
var catalogItems = document.querySelectorAll('.portfolio-content__item');
//Очистка блока с элементами, чтобы при фильрации добавлялись новые в чиситый блок
function removeChildren(item) {
while (item.firstChild) {
item.removeChild(item.firstChild)
}
}
//обновляем элементы в каталоге | item это блок каталога
function updateChildren(item, children) {
removeChildren(item);
for (var i = 0; i < children.length; i++) {
item.appendChild(children[i]);
}
}
catalogNav.addEventListener('click', function (e) {
var target = e.target;
var item = closestsElementClass(target, 'portfolio-filter__link');
if (item === null || item.classList.contains('is-active')) {
return;
}
loadContent();
e.preventDefault();
//Получаем значение из атрибута data-filter=""
var filterValue = item.getAttribute('data-filter');
var previousActiveBtn = document.querySelector('.portfolio-filter__link.is-active');
previousActiveBtn.classList.remove('is-active');
item.classList.add('is-active');
//Если выбраны ВСЕ, то просто их всех выводим
if (filterValue === 'all') {
updateChildren(catalog, catalogItems);
return;
}
//Отфильтрованные элементы перемещаем в массив
var filteredItems = [];
for (var i = 0; i < catalogItems.length; i++) {
var currentItem = catalogItems[i];
if (currentItem.getAttribute('data-category') === filterValue) {
filteredItems.push(currentItem);
}
}
updateChildren(catalog, filteredItems);
});
/* filter />>>>>>>>>>>>>>>>>>>>>>*/
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
var smoothScroll = function (targetEl, duration) {
var headerElHeight = document.querySelector('#header').clientHeight; // класс хедера
var target = document.querySelector(targetEl);
var targetPosition = target.getBoundingClientRect().top; //- headerElHeight; //вычитаем размер хедера, если он фиксированный
var startPosition = window.pageYOffset;
var startTime = null;
var ease = function (t, b, c, d) {
t /= d / 2;
if (t < 1) return c / 2 * t * t + b;
t--;
return -c / 2 * (t * (t - 2) - 1) + b;
};
var animation = function (currentTime) {
if (startTime === null) startTime = currentTime;
var timeElapsed = currentTime - startTime;
var run = ease(timeElapsed, startPosition, targetPosition, duration);
window.scrollTo(0, run);
if (timeElapsed < duration) requestAnimationFrame(animation);
};
requestAnimationFrame(animation);
};
var scrollTo = function () {
var headerNav = document.querySelector('.navigation');
//var links = document.querySelectorAll('.js-scroll'); //добавляем классы к линкам
var links = document.querySelectorAll('.nav-link'); //добавляем классы к линкам
links.forEach(each => {
each.addEventListener('click', function () {
var currentTarget = this.getAttribute('href');
smoothScroll(currentTarget, 1000);
//выход из мобильного меню
headerNav.classList.remove('active');
document.querySelector("#burger").classList.remove('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
});
});
};
scrollTo();
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
/* мобильное меню >>>>>>>>>>>>>>>>>>>>>>*/
var burger = document.querySelector("#burger");
var navigation = document.querySelector(".navigation");
var contactBtn = document.querySelector(".hire--btn");
burger.addEventListener('click', function () {
body.classList.toggle('overflow')
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
})
contactBtn.addEventListener('click', function () {
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
})
/* мобильное меню />>>>>>>>>>>>>>>>>>>>>>*/
//filter >>>>>>>>>>>>>>>>>>>>>>*/
var filter = document.querySelector("#filter");
var filterBlock = document.querySelector(".portfolio-filter");
filter.addEventListener('click', function () {
filterBlock.classList.toggle('open');
})
//filter />>>>>>>>>>>>>>>>>>>>>>*/
//popup >>>>>>>>>>>>>>>>>>>>>>*/
//Функция для поиска атрибута по вложенным тегам
var closestsElementAttr = function (elem, attr) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* атрибут родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
var attribute = node.getAttribute(attr);
if (attribute) {
return attribute; //атрибут есть — значит его и возвращаем, прекращая функцию
}
/*/ если атрибут пуст, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется атрибут,
* иначе return null
/*/
node = node.parentElement;
}
//возврат null если нет нашего атрибута ни у элемента, ни у его дочерних узлов
return null;
}
//Поиск ближайшего элемента по классу
var closestsElementClass = function (elem, className) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* класс родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
/*/ если текущий элемент содержит тот класс, который мы ему передали,
* при вызове функции, то просто возвращаем этот элемент,
/*/
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
/*/ если класса нет, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется класс,
* который мы передали, иначе return null
/*/
node = node.parentElement;
}
//возврат null если нет нашего класса ни у элемента, ни у его дочерних узлов
return null;
}
//Показ попапа
function showPopup(target) {
target.classList.add('is-active');
}
//Скрытие попапа
function closePopup(target) {
target.classList.remove('is-active');
}
//BODY overflow hidden, чтобы при открытом попапе фон не скролился
function bodyOverflow() {
body.classList.toggle('overflow');
}
//Открытие попапа при клике на бургер меню
body.addEventListener('click', function (e) {
var target = e.target;
//Поиск названия data-popup, который задан у кнопки бургера
//var popupClass = target.getAttribute('data-popup');
var popupClass = closestsElementAttr(target, ' | data-popup');
//если элемент, на котором кликнули, не имеет аттрибут data-popup, то выходим
if (popupClass === null) {
r | conditional_block |
|
mamikon.js | on changeSlideLeft() {
//activeSlide.classList.remove('active');
for (var i = slides.length - 1; i >= 0; i--) {
if (slides[i].classList.contains('active')) {
slides[i].classList.remove('active');
if (i > 0)
slides[--i].classList.add('active');
else
slides[slides.length - 1].classList.add('active');
return;
}
}
}
rightArrow.addEventListener('click', function () {
changeSlideRight()
})
leftArrow.addEventListener('click', function () {
changeSlideLeft()
})
/* Slider />>>>>>>>>>>>>>>>>>>*/
/* carousel >>>>>>>>>>>>>>>>>>>*/
var root = document.documentElement;
var carouselElementsDisplayed = getComputedStyle(root).getPropertyValue("--carousel-elements-displayed");
var carouselContent = document.querySelector("ul.carousel-content");
root.style.setProperty("--carousel-elements", carouselContent.children.length);
for (var i = 0; i < carouselElementsDisplayed; i++) {
carouselContent.appendChild(carouselContent.children[i].cloneNode(true));
}
/* carousel />>>>>>>>>>>>>>>>>>>*/
//load-more >>>>>>>>>>>>>>>>>>>>>>*/
var loadMore = document.querySelector("#load-more");
var hiddenWorks = document.querySelectorAll(".portfolio-hidden");
var catalogBlock = document.querySelector('.portfolio-content');
loadMore.addEventListener('click', loadContent);
function loadContent() {
loadMore.style.display = 'none';
for (var i = 0; i < hiddenWorks.length; i++) {
catalogBlock.appendChild(hiddenWorks[i]);
}
}
//load-more />>>>>>>>>>>>>>>>>>>>>>*/
/* filter >>>>>>>>>>>>>>>>>>>>>>*/
var closestsElementClass = function (elem, className) {
var node = elem;
while (node) {
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
node = node.parentElement;
}
return null;
}
var catalog = document.querySelector('.portfolio-content');
//блок с табами
var catalogNav = document.querySelector('.portfolio-filter');
var catalogItems = document.querySelectorAll('.portfolio-content__item');
//Очистка блока с элементами, чтобы при фильрации добавлялись новые в чиситый блок
function removeChildren(item) {
while (item.firstChild) {
item.removeChild(item.firstChild)
}
}
//обновляем элементы в каталоге | item это блок каталога
function updateChildren(item, children) {
removeChildren(item);
for (var i = 0; i < children.length; i++) {
item.appendChild(children[i]);
}
}
catalogNav.addEventListener('click', function (e) {
var target = e.target;
var item = closestsElementClass(target, 'portfolio-filter__link');
if (item === null || item.classList.contains('is-active')) {
return;
}
loadContent();
e.preventDefault();
//Получаем значение из атрибута data-filter=""
var filterValue = item.getAttribute('data-filter');
var previousActiveBtn = document.querySelector('.portfolio-filter__link.is-active');
previousActiveBtn.classList.remove('is-active');
item.classList.add('is-active');
//Если выбраны ВСЕ, то просто их всех выводим
if (filterValue === 'all') {
updateChildren(catalog, catalogItems);
return;
}
//Отфильтрованные элементы перемещаем в массив
var filteredItems = [];
for (var i = 0; i < catalogItems.length; i++) {
var currentItem = catalogItems[i];
if (currentItem.getAttribute('data-category') === filterValue) {
filteredItems.push(currentItem);
}
}
updateChildren(catalog, filteredItems);
});
/* filter />>>>>>>>>>>>>>>>>>>>>>*/
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
var smoothScroll = function (targetEl, duration) {
var headerElHeight = document.querySelector('#header').clientHeight; // класс хедера
var target = document.querySelector(targetEl);
var targetPosition = target.getBoundingClientRect().top; //- headerElHeight; //вычитаем размер хедера, если он фиксированный
var startPosition = window.pageYOffset;
var startTime = null;
var ease = function (t, b, c, d) {
t /= d / 2;
if (t < 1) return c / 2 * t * t + b;
t--;
return -c / 2 * (t * (t - 2) - 1) + b;
};
var animation = function (currentTime) {
if (startTime === null) startTime = currentTime;
var timeElapsed = currentTime - startTime;
var run = ease(timeElapsed, startPosition, targetPosition, duration);
window.scrollTo(0, run);
if (timeElapsed < duration) requestAnimationFrame(animation);
};
requestAnimationFrame(animation);
};
var scrollTo = function () {
var headerNav = document.querySelector('.navigation');
//var links = document.querySelectorAll('.js-scroll'); //добавляем классы к линкам
var links = document.querySelectorAll('.nav-link'); //добавляем классы к линкам
links.forEach(each => {
each.addEventListener('click', function () {
var currentTarget = this.getAttribute('href');
smoothScroll(currentTarget, 1000);
//выход из мобильного меню
headerNav.classList.remove('active');
document.querySelector("#burger").classList.remove('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
});
});
};
scrollTo();
/* переход по якорям />>>>>>>>>>>>>>>>>>>>>>*/
/* мобильное меню >>>>>>>>>>>>>>>>>>>>>>*/
var burger = document.querySelector("#burger");
var navigation = document.querySelector(".navigation");
var contactBtn = document.querySelector(".hire--btn");
burger.addEventListener('click', function () {
body.classList.toggle('overflow')
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
})
contactBtn.addEventListener('click', function () {
navigation.classList.toggle('active');
burger.classList.toggle('hamburger_active');
//body.classList.toggle('overflow')
if (body.classList.contains('overflow'))
body.classList.remove('overflow')
})
/* мобильное меню />>>>>>>>>>>>>>>>>>>>>>*/
//filter >>>>>>>>>>>>>>>>>>>>>>*/
var filter = document.querySelector("#filter");
var filterBlock = document.querySelector(".portfolio-filter");
filter.addEventListener('click', function () {
filterBlock.classList.toggle('open');
})
//filter />>>>>>>>>>>>>>>>>>>>>>*/
//popup >>>>>>>>>>>>>>>>>>>>>>*/
//Функция для поиска атрибута по вложенным тегам
var closestsElementAttr = function (elem, attr) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* атрибут родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
var attribute = node.getAttribute(attr);
if (attribute) {
return attribute; //атрибут есть — значит его и возвращаем, прекращая функцию
}
/*/ если атрибут пуст, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется атрибут,
* иначе return null
/*/
node = node.parentElement;
}
//возврат null если нет нашего атрибута ни у элемента, ни у его дочерних узлов
return null;
}
//Поиск ближайшего элемента по классу
var closestsElementClass = function (elem, className) {
var node = elem;
/*/ если клик по дочернему элементу, то возвращаем
* класс родителя, перескакивая вверх через ноду по циклу
/*/
while (node) {
/*/ если текущий элемент содержит тот класс, который мы ему передали,
* при вызове функции, то просто возвращаем этот элемент,
/*/
if (node.classList.contains(className)) {
return node; //класс есть — значит его и возвращаем, прекращая функцию
}
/*/ если класса нет, то вместо текущего елемента берется его родительский
* и так по циклу до тех пор, пока у конечного родителя не найдется класс,
* | iveSlide.classList.remove('active');
for (var i = 0; i < slides.length; i++) {
if (slides[i].classList.contains('active')) {
slides[i].classList.remove('active');
if (i < slides.length - 1)
slides[++i].classList.add('active');
else
slides[0].classList.add('active');
return;
}
}
}
functi | identifier_body |
|
GameScene.ts | .removeChild(this.m_BeanView);
gameLayer.removeChild(this.m_FlyBeanContainer);
gameLayer.removeChild(this.m_SlitherContainer);
this.allNames = {};
this.m_TileBackground = null;
this.m_BeanView = null;
this.m_FlyBeanContainer = null;
this.m_SlitherContainer = null;
}
public StopTweenCamera()
{
}
public LateUpdate(deltaTime:number)
{
/*let UIWidth = SceneManager.GetInstance().GetUIViewWidth();
let UIHeight = SceneManager.GetInstance().GetUIViewHeight();
Camera3D *gameCamera = SceneManager.GetInstance().GetGameCamera();
if (PlayerController.Ins && !IsGameOver())
{
Vector2D headPos = PlayerController.Ins.GetSlitherBody().GetHeadPos();
gameCamera.SetPosition(Vector3D(headPos.x, headPos.y, 0));
v:numberiewHalfHeight = this.m_WidthToView.Evaluate(PlayerController.Ins.GetSlitherBody().GetWidth());
viewHalfHeight *= this.m_ExtraViewSize;
if (UIWidth > UIHeight) {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight);
} else {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight / (UIWidth / UIHeight));
}
}
// all slither update
for (auto slither : this.m_AllSlithers)
{
slither.LateUpdate();
}*/
}
public SetOrthoSize(size)
{
}
public ContainesSlitherID(data:Array<SlitherHeadInfo>, id:number):boolean
{
for (let i = 0; i < data.length; ++i)
{
if (data[i].id == id)
{
return true;
}
}
return false;
}
public OnReSpawn(msg:RpcReSpawn)
{
this.m_PlayerID = msg.playerID;
this.m_PlayerNameInfo = msg.playerNameInfo;
this.AddNameInfo(this.m_PlayerNameInfo);
this.m_FirstSynced = false;
this.DestroyAllSlithers();
//SceneManager.GetInstance().GetGameCamera().SetPosition(Vector3D(msg.initialPos.x, msg.initialPos.y, 0));
//DispatchEvent(InGameEvent.kInGamePlayerRespawn);
}
public IsNpc(id:number):boolean
{
let it = this.allNames[id];
if (it) {
return it.npc;
}
return false;
}
public FindSlither(name:string):Slither;
public FindSlither(name:number):Slither;
public FindSlither(name:any):Slither
{
let attr = (typeof name == "number")?"GetID":"GetName";
let size = this.m_AllSlithers.length;
for (let i = 0; i < size; ++i) {
let slither = this.m_AllSlithers[i];
if (slither[attr]() == name) {
return slither;
}
}
return null;
}
public AddNameInfo(name:NameInfo)
{
this.allNames[name.id] = name;
}
public SendSlitherCmd(targetDirection:Vector2D, accelarating:boolean)
{
}
public SetSlitherNameInfo(slither:Slither, id:number)
{
let info = this.allNames[id];
if (!info) {
return;
}
slither.SetName(info.name);
slither.GetBody().SetSkinID(info.skinID);
slither.GetBody().SetDecorationID(info.decorationID);
slither.GetBody().SetNameInfo(info);
slither.SetArea(info.publicInfo.area);
}
public OnSlitherBirthDeathSync(msg:RpcSlitherBirthDeathSync)
{
let foundPlayer = false;
let count = msg.slithers.length;
for (let i = 0; i < count; i++)
{
let info = msg.slithers[i];
let slither = this.FindSlither(info.id);
let isPlayer = info.id == this.m_PlayerID;
if (isPlayer) {
foundPlayer = true;
}
if (info.type == SlitherBirthDeath.EnterView)
{
//进视野
if (slither == null)
{
if (isPlayer)
{
let slitherBody = SlitherNodeBody.CreateSlither(SlitherType.kSlitherPlayerMP);
slitherBody.SetAsMainPlayer();
slither = new Slither(slitherBody);
slither.SetController(new PlayerController(slitherBody, slither));
}
else
{
let slitherBody = SlitherNodeBody.CreateSlither(SlitherType.kSlitherOtherMP);
slither = new Slither(slitherBody);
}
this.SetSlitherNameInfo(slither, info.id);
this.m_AllSlithers.push(slither);
this.m_SlitherContainer.addChild(slither.GetBody());
}
slither.Sync(info.id);
}
else if (info.type == SlitherBirthDeath.ExitView)
{
//出视野
if (slither != null)
{
for (let m = 0; m < this.m_AllSlithers.length; ++m) {
if (this.m_AllSlithers[m] == slither) {
this.m_AllSlithers.splice(m, 1);
break;
}
}
this.m_BeanView.OnDeleteSlither(slither);
if (slither.GetBody().GetVisible()) {
slither.GetBody().Die();
egret.setTimeout((slither)=>{
slither.dispose();
}, this, slither.GetBody().DieDestroyDelay * 1000 + 100, slither);
}
else
{
slither.dispose();
}
}
else
{
console.log("Slither exit game but not found slither entity.\n");
}
}
}
// 如果第一次同步没有玩家信息,则判定死亡
if (this.m_PlayerID != 0 && !this.m_FirstSynced) {
this.m_FirstSynced = true;
}
}
public OnSlitherHeadSync(msg:RpcSlitherHeadSync)
{
for (let i = 0; i < msg.slithers.length; ++i)
{
let info = msg.slithers[i];
let slither = this.FindSlither(info.id);
if (slither != null)
{
if (this.m_PlayerID == info.id) {//记录成绩
this.m_Playerlength = info.length;
}
slither.SyncInfo(info);
}
}
}
public OnPlayerInfoSync(msg:RpcPlayerInfoSync)
{
this.m_PlayerInfo = msg.playerInfo;
if (PlayerController.Ins) {
PlayerController.Ins.GetSlither().SyncPlayerInfo(this.m_PlayerInfo);
}
}
public OnSlitherBodySync(msg:RpcSlitherBodySync)
{
for (let i = 0; i < msg.slithers.length; ++i)
{
let info = msg.slithers[i];
let slither = this.FindSlither(info.id);
if (slither != null)
{
slither.SyncPoints(info.points);
}
}
}
public DestroyAllSlithers()
{
let list = this.m_AllSlithers;
let size = list.length;
for (let i = 0; i < size; ++i) {
list[i].dispose();
delete list[i];
}
this.m_AllSlithers.length = 0;
//this.m_BodyDataMgr.DestroyAllSlithers();
}
public DoGameOver()
{
this.m_GameOver = true;
//SoundManager.GetInstance().StopBackgroundMusic();
for (let i = 0; i < this.m_AllSlithers.length; i++) {
let slither = this.m_AllSlithers[i];
if (slither) {
let speed = slither.GetBody().GetSpeed();
speed.Normalize();
speed.x *= 0.001;
speed.y *= 0.001;
slither.m_Body.SetSpeed(speed);
}
}
// 隐藏游戏主界面
//SceneManager.GetInstance().HideApp(AppModuleType.kAppInGameMainUI);
//SceneManager.GetInstance().ShowApp(AppModuleType.kAppInGameTimeOver,true);
}
public Update(deltaTime:number)
{
if (this.m_GameOver) {
return;
}
this.m_Time += deltaTime;
for (let i = 0; i < this.m_AllSlithers.length; ++i) {
this.m_AllSlithers[i].Update(deltaTime);
}
this.m_BeanView.Up | date(deltaTime);
this.LateUpdate(deltaTime);
| conditional_block |
|
GameScene.ts | Size:number;
private m_LastOrthoSizeTarget:number;
private m_CurrOrthoSizeTarget:number;
private m_CameraViewAnimDuration:number = 1.0;
private m_CameraViewFactor:number = 1.0;
protected m_Time:number;
protected m_Radius:number;
protected allNames:Object = new Object();
protected m_PlayerNameInfo:NameInfo = new NameInfo();
protected m_PlayerID:number;
protected m_EnemyID:number;
private m_BeanView:GameSceneBeanView;
private m_CamaraViewFactor:number;
public constructor()
{
super(SceneType.kSceneGame)
GameScene.Inst = this;
this.m_CamaraViewFactor=1.0;
this.m_CameraViewAnimDuration=1.0;
this.m_ExtraViewSize = 1.0;
this.m_GameOver = false;
this.m_SortingOrderCounter = 0;
this.m_FirstSynced = false;
this.m_PlayerID = 0;
this.m_LastOrthoSizeTarget = 3.2;
this.m_CurrOrthoSizeTarget = 3.2;
this.m_Time = 0.0;
this.m_BeanView = null;
this.m_TileBackground = null;
this.m_WidthToView.AddKey(0.0, 3.675);
this.m_WidthToView.AddKey(0.4, 3.675);
this.m_WidthToView.AddKey(0.5, 4.1);
this.m_WidthToView.AddKey(0.65, 4.633);
this.m_WidthToView.AddKey(0.75, 4.841);
this.m_WidthToView.AddKey(0.8, 4.91);
this.m_WidthToView.AddKey(1.0, 5.25);
this.m_WidthToView.AddKey(1.5, 6.0);
this.m_WidthToView.AddKey(1.971, 6.471);
this.m_WidthToView.AddKey(2.654, 2.801);
this.m_WidthToView.AddKey(3.2, 6.868);
let gameLayer = this;
this.m_SlitherContainer = new egret.DisplayObjectContainer();
this.m_FlyBeanContainer = new egret.DisplayObjectContainer();
this.m_TileBackground = new TiledBackground(this);
this.m_BeanView = new GameSceneBeanView(this);
gameLayer.addChild(this.m_TileBackground);
gameLayer.addChild(this.m_BeanView);
gameLayer.addChild(this.m_FlyBeanContainer);
gameLayer.addChild(this.m_SlitherContainer);
this.m_Radius = MainEnterGame.GetInstance().SceneInitialize.sceneRadius;
let net = GameNetManager.GetInstance();
net.AddEventListener(CmdIDs.kRpcSlitherBirthDeathSync, this.OnSlitherBirthDeathSync, this);
net.AddEventListener(CmdIDs.kRpcPlayerInfoSync, this.OnPlayerInfoSync, this);
net.AddEventListener(CmdIDs.kRpcReSpawn, this.OnReSpawn, this);
net.AddEventListener(CmdIDs.kRpcSlitherHeadSync, this.OnSlitherHeadSync, this);
net.AddEventListener(CmdIDs.kRpcSlitherBodySync, this.OnSlitherBodySync, this);
//net.AddEventListener(CmdIDs.kRpcKiller, this.OnRpcKiller, this);
//net.AddEventListener(CmdIDs.kRpcKilled, this.OnRpcKilled, this);
}
public InvokeGameOver() { this.m_GameOver = true; }
public IsGameOver():boolean { return this.m_GameOver; }
public GetPlayerInfo():SlitherPlayerInfo { return this.m_PlayerInfo; }
public GetPlayerScore():number {
let score = this.m_PlayerInfo.score;
if (score < 0){
score = 0;
}
return score;
}
public GetPlayerLength():number { return this.m_Playerlength; }
public GetSlithers():Array<Slither> { return this.m_AllSlithers; }
public GetEnemyID():number { return this.m_EnemyID; }
public GetPlayerID():number { return this.m_PlayerID; }
public GetRadius():number { return this.m_Radius; }
public GetPlayerNameInfo():NameInfo { return this.m_PlayerNameInfo; }
public dispose()
{
GameScene.Inst = null;
this.StopTweenCamera();
let net = GameNetManager.GetInstance();
net.RemoveEventListener(CmdIDs.kRpcSlitherBirthDeathSync, this.OnSlitherBirthDeathSync, this);
net.RemoveEventListener(CmdIDs.kRpcPlayerInfoSync, this.OnPlayerInfoSync, this);
net.RemoveEventListener(CmdIDs.kRpcReSpawn, this.OnReSpawn, this);
net.RemoveEventListener(CmdIDs.kRpcSlitherHeadSync, this.OnSlitherHeadSync, this);
net.RemoveEventListener(CmdIDs.kRpcSlitherBodySync, this.OnSlitherBodySync, this);
this.DestroyAllSlithers();
let gameLayer = this;
gameLayer.removeChild(this.m_TileBackground);
gameLayer.removeChild(this.m_BeanView);
gameLayer.removeChild(this.m_FlyBeanContainer);
gameLayer.removeChild(this.m_SlitherContainer);
this.allNames = {};
this.m_TileBackground = null;
this.m_BeanView = null;
this.m_FlyBeanContainer = null;
this.m_SlitherContainer = null;
}
public StopTweenCamera()
{
}
public LateUpdate(deltaTime:number)
{
/*let UIWidth = SceneManager.GetInstance().GetUIViewWidth();
let UIHeight = SceneManager.GetInstance().GetUIViewHeight();
Camera3D *gameCamera = SceneManager.GetInstance().GetGameCamera();
if (PlayerController.Ins && !IsGameOver())
{
Vector2D headPos = PlayerController.Ins.GetSlitherBody().GetHeadPos();
gameCamera.SetPosition(Vector3D(headPos.x, headPos.y, 0));
v:numberiewHalfHeight = this.m_WidthToView.Evaluate(PlayerController.Ins.GetSlitherBody().GetWidth());
viewHalfHeight *= this.m_ExtraViewSize;
if (UIWidth > UIHeight) {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight);
} else {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight / (UIWidth / UIHeight));
}
}
// all slither update
for (auto slither : this.m_AllSlithers)
{
slither.LateUpdate();
}*/
}
public SetOrthoSize(size)
{
}
public ContainesSlitherID(data:Array<SlitherHeadInfo>, id:number):boolean
{
for (let i = 0; i < data.length; ++i)
{
if (data[i].id == id)
{
return true;
}
}
return false;
}
public OnReSpawn(msg:RpcReSpawn)
{
this.m_PlayerID = msg.playerID;
this.m_PlayerNameInfo = msg.playerNameInfo;
this.AddNameInfo(this.m_PlayerNameInfo);
this.m_FirstSynced = false;
this.DestroyAllSlithers();
//SceneManager.GetInstance().GetGameCamera().SetPosition(Vector3D(msg.initialPos.x, msg.initialPos.y, 0));
//DispatchEvent(InGameEvent.kInGamePlayerRespawn);
}
public IsNpc(id:number):boolean
{
let it = this.allNames[id];
if (it) {
return it.npc;
}
return false;
}
public FindSlither(name:string):Slither;
public FindSlither(name:number):Slither;
public FindSlither(name:any):Slither
{
| public AddNameInfo(name:NameInfo)
{
this.allNames[name.id] = name;
}
public SendSlitherCmd(targetDirection:Vector2D, accelarating:boolean)
{
}
public SetSlitherNameInfo(slither:Slither, id:number)
{
let info = this.allNames[id];
if (!info) {
return;
}
slither.SetName(info.name);
slither.GetBody().SetSkinID(info.skinID);
slither.GetBody().SetDecorationID(info.decorationID);
slither.GetBody().SetNameInfo(info);
slither.SetArea(info.publicInfo.area);
}
public OnSlitherBirthDeathSync(msg:RpcSlitherBirthDeathSync)
{
let foundPlayer = false;
let count = msg.slithers.length;
for (let i = | let attr = (typeof name == "number")?"GetID":"GetName";
let size = this.m_AllSlithers.length;
for (let i = 0; i < size; ++i) {
let slither = this.m_AllSlithers[i];
if (slither[attr]() == name) {
return slither;
}
}
return null;
}
| identifier_body |
GameScene.ts | Size:number;
private m_LastOrthoSizeTarget:number;
private m_CurrOrthoSizeTarget:number;
private m_CameraViewAnimDuration:number = 1.0;
private m_CameraViewFactor:number = 1.0;
protected m_Time:number;
protected m_Radius:number;
protected allNames:Object = new Object();
protected m_PlayerNameInfo:NameInfo = new NameInfo();
protected m_PlayerID:number;
protected m_EnemyID:number;
private m_BeanView:GameSceneBeanView;
private m_CamaraViewFactor:number;
public constructor()
{
super(SceneType.kSceneGame)
GameScene.Inst = this;
this.m_CamaraViewFactor=1.0;
this.m_CameraViewAnimDuration=1.0;
this.m_ExtraViewSize = 1.0;
this.m_GameOver = false;
this.m_SortingOrderCounter = 0;
this.m_FirstSynced = false;
this.m_PlayerID = 0;
this.m_LastOrthoSizeTarget = 3.2;
this.m_CurrOrthoSizeTarget = 3.2;
this.m_Time = 0.0;
this.m_BeanView = null;
this.m_TileBackground = null;
this.m_WidthToView.AddKey(0.0, 3.675);
this.m_WidthToView.AddKey(0.4, 3.675);
this.m_WidthToView.AddKey(0.5, 4.1);
this.m_WidthToView.AddKey(0.65, 4.633);
this.m_WidthToView.AddKey(0.75, 4.841);
this.m_WidthToView.AddKey(0.8, 4.91);
this.m_WidthToView.AddKey(1.0, 5.25);
this.m_WidthToView.AddKey(1.5, 6.0);
this.m_WidthToView.AddKey(1.971, 6.471);
this.m_WidthToView.AddKey(2.654, 2.801);
this.m_WidthToView.AddKey(3.2, 6.868);
let gameLayer = this;
this.m_SlitherContainer = new egret.DisplayObjectContainer();
this.m_FlyBeanContainer = new egret.DisplayObjectContainer();
this.m_TileBackground = new TiledBackground(this);
this.m_BeanView = new GameSceneBeanView(this);
gameLayer.addChild(this.m_TileBackground);
gameLayer.addChild(this.m_BeanView);
gameLayer.addChild(this.m_FlyBeanContainer);
gameLayer.addChild(this.m_SlitherContainer);
this.m_Radius = MainEnterGame.GetInstance().SceneInitialize.sceneRadius;
let net = GameNetManager.GetInstance();
net.AddEventListener(CmdIDs.kRpcSlitherBirthDeathSync, this.OnSlitherBirthDeathSync, this);
net.AddEventListener(CmdIDs.kRpcPlayerInfoSync, this.OnPlayerInfoSync, this);
net.AddEventListener(CmdIDs.kRpcReSpawn, this.OnReSpawn, this);
net.AddEventListener(CmdIDs.kRpcSlitherHeadSync, this.OnSlitherHeadSync, this);
net.AddEventListener(CmdIDs.kRpcSlitherBodySync, this.OnSlitherBodySync, this);
//net.AddEventListener(CmdIDs.kRpcKiller, this.OnRpcKiller, this);
//net.AddEventListener(CmdIDs.kRpcKilled, this.OnRpcKilled, this);
}
public InvokeGameOver() { this.m_GameOver = true; }
public IsGameOver():boolean { return this.m_GameOver; }
public GetPlayerInfo():SlitherPlayerInfo { return this.m_PlayerInfo; }
public GetPlayerScore():number {
let score = this.m_PlayerInfo.score;
if (score < 0){
score = 0;
}
return score;
}
public GetPlayerLength():number { return this.m_Playerlength; }
public GetSlithers():Array<Slither> { return this.m_AllSlithers; }
public GetEnemyID():number { return this.m_EnemyID; }
public GetPlayerID():number { return this.m_PlayerID; }
public GetRadius():number { return this.m_Radius; }
public GetPlayerNameInfo():NameInfo { return this.m_PlayerNameInfo; }
public dispose()
{
GameScene.Inst = null;
this.StopTweenCamera();
let net = GameNetManager.GetInstance();
net.RemoveEventListener(CmdIDs.kRpcSlitherBirthDeathSync, this.OnSlitherBirthDeathSync, this);
net.RemoveEventListener(CmdIDs.kRpcPlayerInfoSync, this.OnPlayerInfoSync, this);
net.RemoveEventListener(CmdIDs.kRpcReSpawn, this.OnReSpawn, this);
net.RemoveEventListener(CmdIDs.kRpcSlitherHeadSync, this.OnSlitherHeadSync, this);
net.RemoveEventListener(CmdIDs.kRpcSlitherBodySync, this.OnSlitherBodySync, this);
this.DestroyAllSlithers();
let gameLayer = this;
gameLayer.removeChild(this.m_TileBackground);
gameLayer.removeChild(this.m_BeanView);
gameLayer.removeChild(this.m_FlyBeanContainer);
gameLayer.removeChild(this.m_SlitherContainer);
this.allNames = {};
this.m_TileBackground = null;
this.m_BeanView = null;
this.m_FlyBeanContainer = null;
this.m_SlitherContainer = null;
}
public StopTweenCamera()
{
}
public LateUpdate(deltaTime:number)
{
/*let UIWidth = SceneManager.GetInstance().GetUIViewWidth();
let UIHeight = SceneManager.GetInstance().GetUIViewHeight();
Camera3D *gameCamera = SceneManager.GetInstance().GetGameCamera();
if (PlayerController.Ins && !IsGameOver())
{
Vector2D headPos = PlayerController.Ins.GetSlitherBody().GetHeadPos();
gameCamera.SetPosition(Vector3D(headPos.x, headPos.y, 0));
v:numberiewHalfHeight = this.m_WidthToView.Evaluate(PlayerController.Ins.GetSlitherBody().GetWidth());
viewHalfHeight *= this.m_ExtraViewSize;
if (UIWidth > UIHeight) {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight);
} else {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight / (UIWidth / UIHeight));
}
}
// all slither update
for (auto slither : this.m_AllSlithers)
{
slither.LateUpdate();
}*/
}
public SetOrthoSize(size)
{
}
public ContainesSlitherID(data:Array<SlitherHeadInfo>, id:number):boolean
{
for (let i = 0; i < data.length; ++i)
{
if (data[i].id == id)
{
return true;
}
}
return false;
}
public OnReSpawn(msg:RpcReSpawn)
{
this.m_PlayerID = msg.playerID;
this.m_PlayerNameInfo = msg.playerNameInfo;
this.AddNameInfo(this.m_PlayerNameInfo);
this.m_FirstSynced = false;
this.DestroyAllSlithers();
//SceneManager.GetInstance().GetGameCamera().SetPosition(Vector3D(msg.initialPos.x, msg.initialPos.y, 0));
//DispatchEvent(InGameEvent.kInGamePlayerRespawn);
}
public IsNpc(id:number):boolean
{
let it = this.allNames[id];
if (it) {
return it.npc;
}
return false;
}
public FindSlither(name:string):Slither;
public FindSlither(name:number):Slither;
public FindSlither(name:any):Slither
{
let attr = (typeof name == "number")?"GetID":"GetName";
let size = this.m_AllSlithers.length;
for (let i = 0; i < size; ++i) {
let slither = this.m_AllSlithers[i];
if (slither[attr]() == name) {
return slither;
}
}
return null;
}
public AddNameInfo(name:NameInfo)
{
this.allNames[name.id] = name;
}
public SendSlitherCmd(targetDirection:Vector2D, accelarating:boolean)
{
}
public SetSlitherNameInfo(slither:Slither, id:number)
{
let info = this.allNames[id];
if (!info) {
return;
}
slither.SetName(info.name);
slither.GetBody().SetSkinID(info.skinID);
slither.GetBody().SetDecorationID(info.decorationID);
slither.GetBody().SetNameInfo(info);
slither.SetArea(info.publicInfo.area);
}
public On | sg:RpcSlitherBirthDeathSync)
{
let foundPlayer = false;
let count = msg.slithers.length;
for (let i | SlitherBirthDeathSync(m | identifier_name |
GameScene.ts | Size:number;
private m_LastOrthoSizeTarget:number;
private m_CurrOrthoSizeTarget:number;
private m_CameraViewAnimDuration:number = 1.0;
private m_CameraViewFactor:number = 1.0;
protected m_Time:number;
protected m_Radius:number;
protected allNames:Object = new Object();
protected m_PlayerNameInfo:NameInfo = new NameInfo();
protected m_PlayerID:number;
protected m_EnemyID:number;
private m_BeanView:GameSceneBeanView;
private m_CamaraViewFactor:number;
public constructor()
{
super(SceneType.kSceneGame)
GameScene.Inst = this;
this.m_CamaraViewFactor=1.0;
this.m_CameraViewAnimDuration=1.0;
this.m_ExtraViewSize = 1.0;
this.m_GameOver = false;
this.m_SortingOrderCounter = 0;
this.m_FirstSynced = false;
this.m_PlayerID = 0;
this.m_LastOrthoSizeTarget = 3.2;
this.m_CurrOrthoSizeTarget = 3.2;
this.m_Time = 0.0;
this.m_BeanView = null;
this.m_TileBackground = null;
this.m_WidthToView.AddKey(0.0, 3.675);
this.m_WidthToView.AddKey(0.4, 3.675);
this.m_WidthToView.AddKey(0.5, 4.1);
this.m_WidthToView.AddKey(0.65, 4.633);
this.m_WidthToView.AddKey(0.75, 4.841);
this.m_WidthToView.AddKey(0.8, 4.91);
this.m_WidthToView.AddKey(1.0, 5.25);
this.m_WidthToView.AddKey(1.5, 6.0);
this.m_WidthToView.AddKey(1.971, 6.471);
this.m_WidthToView.AddKey(2.654, 2.801);
this.m_WidthToView.AddKey(3.2, 6.868);
let gameLayer = this;
this.m_SlitherContainer = new egret.DisplayObjectContainer();
this.m_FlyBeanContainer = new egret.DisplayObjectContainer();
this.m_TileBackground = new TiledBackground(this);
this.m_BeanView = new GameSceneBeanView(this);
gameLayer.addChild(this.m_TileBackground);
gameLayer.addChild(this.m_BeanView);
gameLayer.addChild(this.m_FlyBeanContainer);
gameLayer.addChild(this.m_SlitherContainer);
this.m_Radius = MainEnterGame.GetInstance().SceneInitialize.sceneRadius;
let net = GameNetManager.GetInstance();
net.AddEventListener(CmdIDs.kRpcSlitherBirthDeathSync, this.OnSlitherBirthDeathSync, this);
net.AddEventListener(CmdIDs.kRpcPlayerInfoSync, this.OnPlayerInfoSync, this);
net.AddEventListener(CmdIDs.kRpcReSpawn, this.OnReSpawn, this);
net.AddEventListener(CmdIDs.kRpcSlitherHeadSync, this.OnSlitherHeadSync, this);
net.AddEventListener(CmdIDs.kRpcSlitherBodySync, this.OnSlitherBodySync, this);
//net.AddEventListener(CmdIDs.kRpcKiller, this.OnRpcKiller, this);
//net.AddEventListener(CmdIDs.kRpcKilled, this.OnRpcKilled, this);
}
public InvokeGameOver() { this.m_GameOver = true; }
public IsGameOver():boolean { return this.m_GameOver; }
public GetPlayerInfo():SlitherPlayerInfo { return this.m_PlayerInfo; }
public GetPlayerScore():number {
let score = this.m_PlayerInfo.score;
if (score < 0){
score = 0;
}
return score;
}
public GetPlayerLength():number { return this.m_Playerlength; }
public GetSlithers():Array<Slither> { return this.m_AllSlithers; }
public GetEnemyID():number { return this.m_EnemyID; }
public GetPlayerID():number { return this.m_PlayerID; }
public GetRadius():number { return this.m_Radius; }
public GetPlayerNameInfo():NameInfo { return this.m_PlayerNameInfo; }
public dispose()
{
GameScene.Inst = null;
this.StopTweenCamera();
let net = GameNetManager.GetInstance();
net.RemoveEventListener(CmdIDs.kRpcSlitherBirthDeathSync, this.OnSlitherBirthDeathSync, this);
net.RemoveEventListener(CmdIDs.kRpcPlayerInfoSync, this.OnPlayerInfoSync, this);
net.RemoveEventListener(CmdIDs.kRpcReSpawn, this.OnReSpawn, this);
net.RemoveEventListener(CmdIDs.kRpcSlitherHeadSync, this.OnSlitherHeadSync, this);
net.RemoveEventListener(CmdIDs.kRpcSlitherBodySync, this.OnSlitherBodySync, this); |
this.DestroyAllSlithers();
let gameLayer = this;
gameLayer.removeChild(this.m_TileBackground);
gameLayer.removeChild(this.m_BeanView);
gameLayer.removeChild(this.m_FlyBeanContainer);
gameLayer.removeChild(this.m_SlitherContainer);
this.allNames = {};
this.m_TileBackground = null;
this.m_BeanView = null;
this.m_FlyBeanContainer = null;
this.m_SlitherContainer = null;
}
public StopTweenCamera()
{
}
public LateUpdate(deltaTime:number)
{
/*let UIWidth = SceneManager.GetInstance().GetUIViewWidth();
let UIHeight = SceneManager.GetInstance().GetUIViewHeight();
Camera3D *gameCamera = SceneManager.GetInstance().GetGameCamera();
if (PlayerController.Ins && !IsGameOver())
{
Vector2D headPos = PlayerController.Ins.GetSlitherBody().GetHeadPos();
gameCamera.SetPosition(Vector3D(headPos.x, headPos.y, 0));
v:numberiewHalfHeight = this.m_WidthToView.Evaluate(PlayerController.Ins.GetSlitherBody().GetWidth());
viewHalfHeight *= this.m_ExtraViewSize;
if (UIWidth > UIHeight) {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight);
} else {
SetOrthoSize(this.m_CamaraViewFactor * viewHalfHeight / (UIWidth / UIHeight));
}
}
// all slither update
for (auto slither : this.m_AllSlithers)
{
slither.LateUpdate();
}*/
}
public SetOrthoSize(size)
{
}
public ContainesSlitherID(data:Array<SlitherHeadInfo>, id:number):boolean
{
for (let i = 0; i < data.length; ++i)
{
if (data[i].id == id)
{
return true;
}
}
return false;
}
public OnReSpawn(msg:RpcReSpawn)
{
this.m_PlayerID = msg.playerID;
this.m_PlayerNameInfo = msg.playerNameInfo;
this.AddNameInfo(this.m_PlayerNameInfo);
this.m_FirstSynced = false;
this.DestroyAllSlithers();
//SceneManager.GetInstance().GetGameCamera().SetPosition(Vector3D(msg.initialPos.x, msg.initialPos.y, 0));
//DispatchEvent(InGameEvent.kInGamePlayerRespawn);
}
public IsNpc(id:number):boolean
{
let it = this.allNames[id];
if (it) {
return it.npc;
}
return false;
}
public FindSlither(name:string):Slither;
public FindSlither(name:number):Slither;
public FindSlither(name:any):Slither
{
let attr = (typeof name == "number")?"GetID":"GetName";
let size = this.m_AllSlithers.length;
for (let i = 0; i < size; ++i) {
let slither = this.m_AllSlithers[i];
if (slither[attr]() == name) {
return slither;
}
}
return null;
}
public AddNameInfo(name:NameInfo)
{
this.allNames[name.id] = name;
}
public SendSlitherCmd(targetDirection:Vector2D, accelarating:boolean)
{
}
public SetSlitherNameInfo(slither:Slither, id:number)
{
let info = this.allNames[id];
if (!info) {
return;
}
slither.SetName(info.name);
slither.GetBody().SetSkinID(info.skinID);
slither.GetBody().SetDecorationID(info.decorationID);
slither.GetBody().SetNameInfo(info);
slither.SetArea(info.publicInfo.area);
}
public OnSlitherBirthDeathSync(msg:RpcSlitherBirthDeathSync)
{
let foundPlayer = false;
let count = msg.slithers.length;
for (let i | random_line_split |
|
mod.rs | , a warning to indicate as such
//! pub const WARNING: Option<Warning> = ...;
//!
//! // Given a file that we know starts with the correct version prefix, parse it
//! //
//! // If any errors are encountered, print them and exit.
//! pub fn parse(file_content: String) -> FileContent { ... }
//! ```
//! Those are used by the `parse` function at the bottom of this file.
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::fmt::{self, Display, Formatter};
use std::fs::read_to_string;
use std::ops::Range;
use std::path::Path;
use std::process::exit;
use std::time::SystemTime;
mod errors;
mod latest;
mod v0_2;
mod v0_3;
mod v0_4;
pub use errors::*;
/// Helper struct for file contents with an attached key
///
/// This is extracted into this module so that it's able to be used by multiple submodules without
/// redefinition.
pub struct Keyed<C> {
key: Option<Vec<u8>>,
unsaved: bool,
content: C,
}
impl<C> Keyed<C> {
/// Creates a new `Keyed` content without the key
fn new(content: C) -> Self {
Keyed {
key: None,
unsaved: false,
content,
}
}
}
/// A particular file format, kept object-safe so that we can switch on it at runtime
///
/// The first couple methods here are for providing the "meta" methods -- the rest merely provide
/// the shared facilities for interaction with contents of the file.
pub trait FileContent {
/// Helper function to convert to the latest version, given the user's password
///
/// It's customary for this method to only convert to the next version internally, and instead
/// rely upon that version's implementation of producing the current file content. This chain
/// terminates with with the implementation for `CurrentFileContent`, which just returns itself.
fn to_current(self: Box<Self>, pwd: String) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index | /// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum ValueKind {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
}
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, }) => {{
| fn remove_entry(&mut self, idx: usize);
}
| random_line_split |
mod.rs | ) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index
fn remove_entry(&mut self, idx: usize);
}
/// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum ValueKind {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
}
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, }) => {{
let v = $val;
$(if v.starts_with($str) {
$arm
} else)* {
$else_arm
}
}};
}
prefix_match!(content.as_str() => {
"---\nversion: v0.2\n" => (Box::new(v0_2::parse(content)), v0_2::WARNING),
"---\nversion: v0.3\n" => (Box::new(v0_3::parse(content)), v0_3::WARNING),
"---\nversion: v0.4\n" => (Box::new(v0_4::parse(content)), v0_4::WARNING),
_ => {
eprintln!("unrecognized file version, should be one of: ['v0.2', 'v0.3', 'v0.4']");
exit(1)
},
})
}
/// Return type for [`CurrentFileContent::to_plaintext`]
///
/// This is used both to convert between `FileContent` versions *and* to within the
/// `emit-plaintext` and `from-plaintext` subcommands.
#[derive(Serialize, Deserialize)]
pub struct PlaintextContent {
last_update: SystemTime,
entries: Vec<PlaintextEntry>,
}
#[derive(Serialize, Deserialize)]
struct PlaintextEntry {
name: String,
tags: Vec<String>,
fields: Vec<PlaintextField>,
first_added: SystemTime,
last_update: SystemTime,
}
#[derive(Serialize, Deserialize)]
struct PlaintextField {
name: String,
value: PlaintextValue,
}
#[derive(Serialize, Deserialize)]
pub enum PlaintextValue {
Manual { value: String, protected: bool },
Totp { secret: String, issuer: String },
}
impl PlaintextContent {
/// Produces a new, empty `PlaintextContent` with the current time as its last update
fn init() -> Self | {
PlaintextContent {
last_update: SystemTime::now(),
entries: Vec::new(),
}
} | identifier_body |
|
mod.rs | , a warning to indicate as such
//! pub const WARNING: Option<Warning> = ...;
//!
//! // Given a file that we know starts with the correct version prefix, parse it
//! //
//! // If any errors are encountered, print them and exit.
//! pub fn parse(file_content: String) -> FileContent { ... }
//! ```
//! Those are used by the `parse` function at the bottom of this file.
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::fmt::{self, Display, Formatter};
use std::fs::read_to_string;
use std::ops::Range;
use std::path::Path;
use std::process::exit;
use std::time::SystemTime;
mod errors;
mod latest;
mod v0_2;
mod v0_3;
mod v0_4;
pub use errors::*;
/// Helper struct for file contents with an attached key
///
/// This is extracted into this module so that it's able to be used by multiple submodules without
/// redefinition.
pub struct Keyed<C> {
key: Option<Vec<u8>>,
unsaved: bool,
content: C,
}
impl<C> Keyed<C> {
/// Creates a new `Keyed` content without the key
fn new(content: C) -> Self {
Keyed {
key: None,
unsaved: false,
content,
}
}
}
/// A particular file format, kept object-safe so that we can switch on it at runtime
///
/// The first couple methods here are for providing the "meta" methods -- the rest merely provide
/// the shared facilities for interaction with contents of the file.
pub trait FileContent {
/// Helper function to convert to the latest version, given the user's password
///
/// It's customary for this method to only convert to the next version internally, and instead
/// rely upon that version's implementation of producing the current file content. This chain
/// terminates with with the implementation for `CurrentFileContent`, which just returns itself.
fn to_current(self: Box<Self>, pwd: String) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index
fn remove_entry(&mut self, idx: usize);
}
/// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum ValueKind {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => |
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, | {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
} | conditional_block |
mod.rs | , a warning to indicate as such
//! pub const WARNING: Option<Warning> = ...;
//!
//! // Given a file that we know starts with the correct version prefix, parse it
//! //
//! // If any errors are encountered, print them and exit.
//! pub fn parse(file_content: String) -> FileContent { ... }
//! ```
//! Those are used by the `parse` function at the bottom of this file.
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::fmt::{self, Display, Formatter};
use std::fs::read_to_string;
use std::ops::Range;
use std::path::Path;
use std::process::exit;
use std::time::SystemTime;
mod errors;
mod latest;
mod v0_2;
mod v0_3;
mod v0_4;
pub use errors::*;
/// Helper struct for file contents with an attached key
///
/// This is extracted into this module so that it's able to be used by multiple submodules without
/// redefinition.
pub struct Keyed<C> {
key: Option<Vec<u8>>,
unsaved: bool,
content: C,
}
impl<C> Keyed<C> {
/// Creates a new `Keyed` content without the key
fn new(content: C) -> Self {
Keyed {
key: None,
unsaved: false,
content,
}
}
}
/// A particular file format, kept object-safe so that we can switch on it at runtime
///
/// The first couple methods here are for providing the "meta" methods -- the rest merely provide
/// the shared facilities for interaction with contents of the file.
pub trait FileContent {
/// Helper function to convert to the latest version, given the user's password
///
/// It's customary for this method to only convert to the next version internally, and instead
/// rely upon that version's implementation of producing the current file content. This chain
/// terminates with with the implementation for `CurrentFileContent`, which just returns itself.
fn to_current(self: Box<Self>, pwd: String) -> Result<Box<CurrentFileContent>, DecryptError>;
/// Provides the string that the file content should be written as
///
/// This method is provided -- instead of directly writing to the file -- so that no error
/// handling needs to be done within the implementation itself.
fn write(&self) -> String;
/// Sets the key, returning `Err` if it was invalid
fn set_key(&mut self, key: String) -> Result<(), DecryptError>;
/// Returns true if there have been changes made to the file without saving
///
/// Changes should be registered as unsaved until a call to `saved` is made
fn unsaved(&self) -> bool;
/// Registers any unsaved change as now being saved
fn mark_saved(&mut self);
/// Returns whether the entries have been decrypted -- true after `set_key` returns `Ok`
fn decrypted(&self) -> bool;
/// Produces the number of entries in the file
fn num_entries(&self) -> usize;
/// Produces the entry corresponding to the given index
fn entry(&self, idx: usize) -> Box<dyn EntryRef + '_>;
/// Produces a mutable reference to the corresponding entry
fn entry_mut(&mut self, idx: usize) -> Box<dyn EntryMut + '_>;
/// Gives access to the specified range of entries
///
/// The default implementation should suffice, but it *may* be possible for other
/// implementations to improve the performance. In practice, the performance should not matter
/// much.
fn entries_range(&self, range: Range<usize>) -> Vec<Box<dyn EntryRef + '_>> {
range.map(|i| self.entry(i)).collect()
}
/// Returns all of the entires
///
/// The default implementation here should suffice
fn all_entries(&self) -> Vec<Box<dyn EntryRef + '_>> {
let len = self.num_entries();
self.entries_range(0..len)
}
/// Adds an empty entry with the given name and returns its index
fn add_empty_entry(&mut self, name: String) -> usize;
/// Removes the entry at the given index
fn remove_entry(&mut self, idx: usize);
}
/// An immutable handle on an entry in the file
pub trait EntryRef {
/// Returns the title of the entry
fn name(&self) -> &str;
/// Returns all the tags associated with the entry
fn tags(&self) -> Vec<&str>;
/// Returns the date + time at which the
fn first_added(&self) -> SystemTime;
/// Returns the date + time the entry was last updated
fn last_update(&self) -> SystemTime;
/// Returns a reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field(&self, idx: usize) -> Box<dyn FieldRef + '_>;
/// Returns the number of fields in the entry
fn num_fields(&self) -> usize;
}
/// A mutable handle on an entry in the file
pub trait EntryMut: EntryRef {
/// Sets the title of the entry
fn set_name(&mut self, name: String);
/// Sets the tags associated with the entry
fn set_tags(&mut self, tags: Vec<String>);
/// Returns a mutable reference to the field with index `idx`
///
/// ## Panics
///
/// This function *should* panic if `idx` is greater than `self.num_fields()`
fn field_mut(&mut self, idx: usize) -> Box<dyn FieldMut + '_>;
/// Creates a `FieldBuilder` that will (possibly) later be provided back
fn field_builder(&self) -> Box<dyn FieldBuilder>;
/// Sets the field at the given index, using the result of a previous call to
/// `self.field_builder()`
///
/// The index may be one greater than the current number of fields, in which case the value
/// should be appended.
fn set_field(
&mut self,
idx: usize,
builder: Box<dyn FieldBuilder>,
) -> Result<(), SetFieldError>;
/// Removes the given field
fn remove_field(&mut self, idx: usize);
}
/// An immutable handle on a single field of an entry
pub trait FieldRef {
/// The name of the field
fn name(&self) -> &str;
/// Returns the type of value inside this field
fn value_kind(&self) -> ValueKind;
/// The value of the field
///
/// For TOTP fields, this is expected to perform the necessary calculations and return the
/// current OTP.
fn value(&self) -> Result<String, GetValueError>;
/// Returns the "plaintext" value of the field
///
/// Unlike `value`, this returns the underlying secret for TOTP fields.
fn plaintext_value(&self) -> Result<PlaintextValue, GetValueError>;
}
/// A mutable handle on a single field of an entry
pub trait FieldMut: FieldRef {
/// Swaps the encryption of the field
fn swap_encryption(&mut self) -> Result<(), SwapEncryptionError>;
}
/// The types of values a field might have
#[derive(Debug, Copy, Clone)]
pub enum | {
Basic,
Protected,
Totp,
}
impl Display for ValueKind {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
ValueKind::Basic => f.write_str("Basic"),
ValueKind::Protected => f.write_str("Protected"),
ValueKind::Totp => f.write_str("TOTP"),
}
}
}
/// Helper type for constructing a field
pub trait FieldBuilder: Any {
/// Helper method to recover the original type
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Converts the builder to build a "manual" field
fn make_manual(&mut self);
/// Converts the builder to build a TOTP field
fn make_totp(&mut self) -> Result<(), UnsupportedFeature>;
/// Sets the name of the field
fn set_name(&mut self, name: String);
/// Sets the value of the field
///
/// ## Panics
///
/// This method panics if there was no previous successful call to the matching `make_*` method
/// for the value (`make_manual` or `make_totp`).
fn set_value(&mut self, value: PlaintextValue);
}
/// The latest version of the file content -- the most recent implementor of [`FileContent`]
pub type CurrentFileContent = Keyed<v0_4::FileContent>;
/// A warning given after opening a file with a particular format version
pub struct Warning {
pub reason: &'static str,
}
/// Parses a `FileContent` from the provided file, exiting the program on failure
pub fn parse(file: &Path) -> (Box<dyn FileContent>, Option<Warning>) {
let content = match read_to_string(file) {
Ok(c) => c,
Err(e) => {
eprintln!("failed to read file {:?}: {}", file.to_string_lossy(), e);
exit(1);
}
};
macro_rules! prefix_match {
($val:expr => { $($str:literal => $arm:expr,)* _ => $else_arm:expr, }) | ValueKind | identifier_name |
draw.js | }
}
//we're going to split up the month by weeks! See what happens
let monthData = function(year, month) {
let censusArray = sealCensus[year][month];
let censuses = {
week1: [],
week2: [],
week3: [],
week4: []
};
//assign censuses to weeks
censusArray.forEach(census => {
let day = census.date.day;
let week = Math.ceil( day/7 );
switch(week) {
case 1:
censusArray.week1.push(census);
break;
case 2:
censusArray.week2.push(census);
break;
case 3:
censusArray.week3.push(census);
break;
case 4:
censusArray.week4.push(census);
break;
case 5:
censusArray.week4.push(census);
break;
}
return censuses;
});
}
//Add circles where the seals are!
// Create SVG element
let width = 1000;
let height = 500;
var addSVG = function() {
debug("in addSVG()");
let row = -1;
//create svg element
var svg = d3.select("#chart")
.append("svg")
.attr("width", width)
.attr("height", height)
.append("g")
.attr("transform", "translate(0,0)");
//get census data acoording to the slider's value/date
var sliderValue = document.getElementById("myRange").value;
//let dataYear = d3.select("#year").value;
var dataYear = 1976 + (sliderValue/12)|0;
var dataMonth = sliderValue%12 < 0 ? sliderValue%12 : 12;
var dataLocation;
var data;
var noCensus = false;
//var data = sealCensus[dataYear][dataMonth];
let censusArray = sealCensus[dataYear][dataMonth];
let census;
if(censusArray != undefined && censusArray != "undefined") {
census = censusArray[ Math.floor( Math.random() * censusArray.length ) ];
}
else {
census = []; //no recorded censuses for given month
noCensus = true;
}
if(!noCensus) {
dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else {
data = census;
}
data = fixData(data, dataLocation);
console.log("Extracted data for the force simulation: ");
console.log(data);
let xPadding = width/10;
let yPadding = height/10;
//data = [0,1,2,3,4,5,6,7,7,8,9,0,1,2,3,4,5,6,7,8,9,0,9,3,7,9];
//Create and append circle elements element
var circles = svg.selectAll(".seal")
.data(data)
.enter().append("circle")
.attr("class", "seal")
.attr("cx", (d, i) => (i%11) * xPadding)
//.attr("cx", 250)
//.attr("cy", 250)
.attr("cy", (d, i) => (((i/11)|0) + 1) * yPadding)
.attr("r", d => pickSize(d.type))
//.attr("fill", (sliderValue % 2 == 0) ? "blue" : "green");
.attr("fill", d => pickColor(d.type));
//actions for when the slider is updated
//help from http://bl.ocks.org/kbroman/2044064e9517985f2f25
d3.select("input[type=range]#myRange").on("input", function() {
dataYear = 1976 + (this.value/12)|0;
dataMonth = this.value%12 > 0 ? this.value%12 : 12;
console.log("Slider updated to " + dataMonth + " " + dataYear);
//var data = sealCensus[dataYear][dataMonth];
censusArray = sealCensus[dataYear][dataMonth];
if(censusArray != undefined && censusArray != "undefined") {
census = censusArray[ Math.floor( Math.random() * censusArray.length ) ];
noCensus = false;
}
else {
census = []; // no recorded censuses for given month
noCensus = true;
}
if(!noCensus) {
dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else
data = census;
data = fixData(data, dataLocation);
//console.log("New data being displayed: ");
//console.log(data);
//slightly redundant since this is done in autoplay as well
//BUT we also want this to happen when the user picks a year WITHOUT using the autoplayer
d3.select("output#year")
.text(convertSliderValue(this.value));
//update circles with the changes
//help from http://bl.ocks.org/alansmithy/e984477a741bc56db5a5
//let circles = d3.selectAll("circle.seal");
circles.data(data);//.join("circle")
circles.exit().remove(); //remove unneeded circles
circles.enter().append("circle")
.attr("r", d => pickSize(d.type))
.attr("cx", width/2)
.attr("cy", height/2)
.attr("fill", d => pickColor(d.type))
.attr("id", "new")
;//.merge(circles);
//debug("Slider changed to " + year);
//console.log(this);
simulation
.alphaTarget(0.25)
.restart();
simulation.nodes(data).on("tick", ticked);
});
//add text labels
//top left text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/12 + ")")
.text("North Point");
//top right text
svg.append("text")
.style("text-anchor", "middle")
| .style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/2 + ")")
.text("Mid Bight Beach");
//bottom right text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width*2/3 + ", " + height/2 + ")")
.text("Ano Point");
//legend container
svg.append("rect")
.attr("fill", "white")
.attr("x", width - 150)
.attr("y", 10)
.attr("height", 150)
.attr("width", 140);
//black is for pups
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#220C08")
.attr("x", width - 140)
.attr("y", 20)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 37)
.text("Pup, Weanling");
//cream is for juveniles
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#EDE9DD")
.attr("x", width - 140)
.attr("y", 50)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 67)
.text("Yearling, Juvenile");
//tan is for females
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#9B8576")
.attr("x", width - 140)
.attr("y", 80)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 97)
| .attr("transform", "translate(" + width*2/3 + ", " + height/12 + ")")
.text("Bight Beach North");
//bottom left text
svg.append("text")
| random_line_split |
draw.js | dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else {
data = census;
}
data = fixData(data, dataLocation);
console.log("Extracted data for the force simulation: ");
console.log(data);
let xPadding = width/10;
let yPadding = height/10;
//data = [0,1,2,3,4,5,6,7,7,8,9,0,1,2,3,4,5,6,7,8,9,0,9,3,7,9];
//Create and append circle elements element
var circles = svg.selectAll(".seal")
.data(data)
.enter().append("circle")
.attr("class", "seal")
.attr("cx", (d, i) => (i%11) * xPadding)
//.attr("cx", 250)
//.attr("cy", 250)
.attr("cy", (d, i) => (((i/11)|0) + 1) * yPadding)
.attr("r", d => pickSize(d.type))
//.attr("fill", (sliderValue % 2 == 0) ? "blue" : "green");
.attr("fill", d => pickColor(d.type));
//actions for when the slider is updated
//help from http://bl.ocks.org/kbroman/2044064e9517985f2f25
d3.select("input[type=range]#myRange").on("input", function() {
dataYear = 1976 + (this.value/12)|0;
dataMonth = this.value%12 > 0 ? this.value%12 : 12;
console.log("Slider updated to " + dataMonth + " " + dataYear);
//var data = sealCensus[dataYear][dataMonth];
censusArray = sealCensus[dataYear][dataMonth];
if(censusArray != undefined && censusArray != "undefined") {
census = censusArray[ Math.floor( Math.random() * censusArray.length ) ];
noCensus = false;
}
else {
census = []; // no recorded censuses for given month
noCensus = true;
}
if(!noCensus) {
dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else
data = census;
data = fixData(data, dataLocation);
//console.log("New data being displayed: ");
//console.log(data);
//slightly redundant since this is done in autoplay as well
//BUT we also want this to happen when the user picks a year WITHOUT using the autoplayer
d3.select("output#year")
.text(convertSliderValue(this.value));
//update circles with the changes
//help from http://bl.ocks.org/alansmithy/e984477a741bc56db5a5
//let circles = d3.selectAll("circle.seal");
circles.data(data);//.join("circle")
circles.exit().remove(); //remove unneeded circles
circles.enter().append("circle")
.attr("r", d => pickSize(d.type))
.attr("cx", width/2)
.attr("cy", height/2)
.attr("fill", d => pickColor(d.type))
.attr("id", "new")
;//.merge(circles);
//debug("Slider changed to " + year);
//console.log(this);
simulation
.alphaTarget(0.25)
.restart();
simulation.nodes(data).on("tick", ticked);
});
//add text labels
//top left text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/12 + ")")
.text("North Point");
//top right text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width*2/3 + ", " + height/12 + ")")
.text("Bight Beach North");
//bottom left text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/2 + ")")
.text("Mid Bight Beach");
//bottom right text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width*2/3 + ", " + height/2 + ")")
.text("Ano Point");
//legend container
svg.append("rect")
.attr("fill", "white")
.attr("x", width - 150)
.attr("y", 10)
.attr("height", 150)
.attr("width", 140);
//black is for pups
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#220C08")
.attr("x", width - 140)
.attr("y", 20)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 37)
.text("Pup, Weanling");
//cream is for juveniles
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#EDE9DD")
.attr("x", width - 140)
.attr("y", 50)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 67)
.text("Yearling, Juvenile");
//tan is for females
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#9B8576")
.attr("x", width - 140)
.attr("y", 80)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 97)
.text("Adult Female");
//pink/brown is for males
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#BC8D7D")
.attr("x", width - 140)
.attr("y", 110)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 127)
.text("Adult Male");
//the simulation is a collection of forces
//about where we want our circles to go
//and how we want our circles to interact
simulation.nodes(data).on("tick", ticked);
function ticked() {
circles
.attr("cx", d => d.x)
.attr("cy", d => d.y)
}
}
// bubble cluster physics
// help from https://youtu.be/NTS7uXOxQeM
var forceX = d3.forceX( d => {
if(d.location == "North Point") { //North Point
return width*1/3;
}
else if(d.location == "Bight Beach North") { //Bight Beach North
return width*2/3;
}
else if(d.location == "Mid Bight Beach") { //Mid Bight Beach
return width*1/3;
}
else {//Other
return width*2/3;
}
})
.strength(0.1);
var forceY = d3.forceY( d => {
if(d.location == "North Point" || d.location == "Bight Beach North") {
return height/4;
}
else {//d.data[0].long > medianLong
return height*3/4;
}
})
.strength(0.1);
var simulation = d3.forceSimulation()
.force("x", forceX)
.force("y", forceY)
.force("collide", d3.forceCollide( d => pickSize(d.type) ));
//this function specifically exists to modify the data array for the force simulation
//Uncaught TypeError: Cannot create property 'vx' on string 'SA2'
//the force simulation ONLY accepts objects! So we turn the array of strings, into objects.
function | fixData | identifier_name |
|
draw.js | ensuses;
});
}
//Add circles where the seals are!
// Create SVG element
let width = 1000;
let height = 500;
var addSVG = function() {
debug("in addSVG()");
let row = -1;
//create svg element
var svg = d3.select("#chart")
.append("svg")
.attr("width", width)
.attr("height", height)
.append("g")
.attr("transform", "translate(0,0)");
//get census data acoording to the slider's value/date
var sliderValue = document.getElementById("myRange").value;
//let dataYear = d3.select("#year").value;
var dataYear = 1976 + (sliderValue/12)|0;
var dataMonth = sliderValue%12 < 0 ? sliderValue%12 : 12;
var dataLocation;
var data;
var noCensus = false;
//var data = sealCensus[dataYear][dataMonth];
let censusArray = sealCensus[dataYear][dataMonth];
let census;
if(censusArray != undefined && censusArray != "undefined") {
census = censusArray[ Math.floor( Math.random() * censusArray.length ) ];
}
else {
census = []; //no recorded censuses for given month
noCensus = true;
}
if(!noCensus) {
dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else {
data = census;
}
data = fixData(data, dataLocation);
console.log("Extracted data for the force simulation: ");
console.log(data);
let xPadding = width/10;
let yPadding = height/10;
//data = [0,1,2,3,4,5,6,7,7,8,9,0,1,2,3,4,5,6,7,8,9,0,9,3,7,9];
//Create and append circle elements element
var circles = svg.selectAll(".seal")
.data(data)
.enter().append("circle")
.attr("class", "seal")
.attr("cx", (d, i) => (i%11) * xPadding)
//.attr("cx", 250)
//.attr("cy", 250)
.attr("cy", (d, i) => (((i/11)|0) + 1) * yPadding)
.attr("r", d => pickSize(d.type))
//.attr("fill", (sliderValue % 2 == 0) ? "blue" : "green");
.attr("fill", d => pickColor(d.type));
//actions for when the slider is updated
//help from http://bl.ocks.org/kbroman/2044064e9517985f2f25
d3.select("input[type=range]#myRange").on("input", function() {
dataYear = 1976 + (this.value/12)|0;
dataMonth = this.value%12 > 0 ? this.value%12 : 12;
console.log("Slider updated to " + dataMonth + " " + dataYear);
//var data = sealCensus[dataYear][dataMonth];
censusArray = sealCensus[dataYear][dataMonth];
if(censusArray != undefined && censusArray != "undefined") {
census = censusArray[ Math.floor( Math.random() * censusArray.length ) ];
noCensus = false;
}
else {
census = []; // no recorded censuses for given month
noCensus = true;
}
if(!noCensus) {
dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else
data = census;
data = fixData(data, dataLocation);
//console.log("New data being displayed: ");
//console.log(data);
//slightly redundant since this is done in autoplay as well
//BUT we also want this to happen when the user picks a year WITHOUT using the autoplayer
d3.select("output#year")
.text(convertSliderValue(this.value));
//update circles with the changes
//help from http://bl.ocks.org/alansmithy/e984477a741bc56db5a5
//let circles = d3.selectAll("circle.seal");
circles.data(data);//.join("circle")
circles.exit().remove(); //remove unneeded circles
circles.enter().append("circle")
.attr("r", d => pickSize(d.type))
.attr("cx", width/2)
.attr("cy", height/2)
.attr("fill", d => pickColor(d.type))
.attr("id", "new")
;//.merge(circles);
//debug("Slider changed to " + year);
//console.log(this);
simulation
.alphaTarget(0.25)
.restart();
simulation.nodes(data).on("tick", ticked);
});
//add text labels
//top left text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/12 + ")")
.text("North Point");
//top right text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width*2/3 + ", " + height/12 + ")")
.text("Bight Beach North");
//bottom left text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/2 + ")")
.text("Mid Bight Beach");
//bottom right text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width*2/3 + ", " + height/2 + ")")
.text("Ano Point");
//legend container
svg.append("rect")
.attr("fill", "white")
.attr("x", width - 150)
.attr("y", 10)
.attr("height", 150)
.attr("width", 140);
//black is for pups
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#220C08")
.attr("x", width - 140)
.attr("y", 20)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 37)
.text("Pup, Weanling");
//cream is for juveniles
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#EDE9DD")
.attr("x", width - 140)
.attr("y", 50)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 67)
.text("Yearling, Juvenile");
//tan is for females
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#9B8576")
.attr("x", width - 140)
.attr("y", 80)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 97)
.text("Adult Female");
//pink/brown is for males
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#BC8D7D")
.attr("x", width - 140)
.attr("y", 110)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 127)
.text("Adult Male");
//the simulation is a collection of forces
//about where we want our circles to go
//and how we want our circles to interact
simulation.nodes(data).on("tick", ticked);
function ticked() | {
circles
.attr("cx", d => d.x)
.attr("cy", d => d.y)
} | identifier_body |
|
draw.js | %12 < 0 ? sliderValue%12 : 12;
var dataLocation;
var data;
var noCensus = false;
//var data = sealCensus[dataYear][dataMonth];
let censusArray = sealCensus[dataYear][dataMonth];
let census;
if(censusArray != undefined && censusArray != "undefined") {
census = censusArray[ Math.floor( Math.random() * censusArray.length ) ];
}
else {
census = []; //no recorded censuses for given month
noCensus = true;
}
if(!noCensus) {
dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else {
data = census;
}
data = fixData(data, dataLocation);
console.log("Extracted data for the force simulation: ");
console.log(data);
let xPadding = width/10;
let yPadding = height/10;
//data = [0,1,2,3,4,5,6,7,7,8,9,0,1,2,3,4,5,6,7,8,9,0,9,3,7,9];
//Create and append circle elements element
var circles = svg.selectAll(".seal")
.data(data)
.enter().append("circle")
.attr("class", "seal")
.attr("cx", (d, i) => (i%11) * xPadding)
//.attr("cx", 250)
//.attr("cy", 250)
.attr("cy", (d, i) => (((i/11)|0) + 1) * yPadding)
.attr("r", d => pickSize(d.type))
//.attr("fill", (sliderValue % 2 == 0) ? "blue" : "green");
.attr("fill", d => pickColor(d.type));
//actions for when the slider is updated
//help from http://bl.ocks.org/kbroman/2044064e9517985f2f25
d3.select("input[type=range]#myRange").on("input", function() {
dataYear = 1976 + (this.value/12)|0;
dataMonth = this.value%12 > 0 ? this.value%12 : 12;
console.log("Slider updated to " + dataMonth + " " + dataYear);
//var data = sealCensus[dataYear][dataMonth];
censusArray = sealCensus[dataYear][dataMonth];
if(censusArray != undefined && censusArray != "undefined") {
census = censusArray[ Math.floor( Math.random() * censusArray.length ) ];
noCensus = false;
}
else {
census = []; // no recorded censuses for given month
noCensus = true;
}
if(!noCensus) {
dataLocation = generalLocation(census.location);
data = census.sealCensusLong;
}
else
data = census;
data = fixData(data, dataLocation);
//console.log("New data being displayed: ");
//console.log(data);
//slightly redundant since this is done in autoplay as well
//BUT we also want this to happen when the user picks a year WITHOUT using the autoplayer
d3.select("output#year")
.text(convertSliderValue(this.value));
//update circles with the changes
//help from http://bl.ocks.org/alansmithy/e984477a741bc56db5a5
//let circles = d3.selectAll("circle.seal");
circles.data(data);//.join("circle")
circles.exit().remove(); //remove unneeded circles
circles.enter().append("circle")
.attr("r", d => pickSize(d.type))
.attr("cx", width/2)
.attr("cy", height/2)
.attr("fill", d => pickColor(d.type))
.attr("id", "new")
;//.merge(circles);
//debug("Slider changed to " + year);
//console.log(this);
simulation
.alphaTarget(0.25)
.restart();
simulation.nodes(data).on("tick", ticked);
});
//add text labels
//top left text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/12 + ")")
.text("North Point");
//top right text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width*2/3 + ", " + height/12 + ")")
.text("Bight Beach North");
//bottom left text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width/3 + ", " + height/2 + ")")
.text("Mid Bight Beach");
//bottom right text
svg.append("text")
.style("text-anchor", "middle")
.attr("transform", "translate(" + width*2/3 + ", " + height/2 + ")")
.text("Ano Point");
//legend container
svg.append("rect")
.attr("fill", "white")
.attr("x", width - 150)
.attr("y", 10)
.attr("height", 150)
.attr("width", 140);
//black is for pups
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#220C08")
.attr("x", width - 140)
.attr("y", 20)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 37)
.text("Pup, Weanling");
//cream is for juveniles
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#EDE9DD")
.attr("x", width - 140)
.attr("y", 50)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 67)
.text("Yearling, Juvenile");
//tan is for females
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#9B8576")
.attr("x", width - 140)
.attr("y", 80)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 97)
.text("Adult Female");
//pink/brown is for males
svg.append("rect")
.attr("stroke", "black")
.attr("fill", "#BC8D7D")
.attr("x", width - 140)
.attr("y", 110)
.attr("height", 25)
.attr("width", 25);
svg.append("text")
.attr("fill", "black")
.attr("x", width - 105)
.attr("y", 127)
.text("Adult Male");
//the simulation is a collection of forces
//about where we want our circles to go
//and how we want our circles to interact
simulation.nodes(data).on("tick", ticked);
function ticked() {
circles
.attr("cx", d => d.x)
.attr("cy", d => d.y)
}
}
// bubble cluster physics
// help from https://youtu.be/NTS7uXOxQeM
var forceX = d3.forceX( d => {
if(d.location == "North Point") { //North Point
return width*1/3;
}
else if(d.location == "Bight Beach North") { //Bight Beach North
return width*2/3;
}
else if(d.location == "Mid Bight Beach") { //Mid Bight Beach
return width*1/3;
}
else {//Other
return width*2/3;
}
})
.strength(0.1);
var forceY = d3.forceY( d => {
if(d.location == "North Point" || d.location == "Bight Beach North") | {
return height/4;
} | conditional_block |
|
main.rs | ::with_name("find")
.short("f")
.long("find")
.value_name("METHOD")
.default_value("any")
.possible_values(&["any", "all"])
.help("Whether to find *all* paths for each graph or *any* path for each graph"))
.get_matches();
let start_time = PreciseTime::now();
let start: usize = matches.value_of("start").unwrap().parse().expect("Could not parse start value");
let limit: usize = matches.value_of("end").unwrap().parse().expect("Could not parse end value");
let method = match matches.value_of("find").unwrap() {
"any" => Method::Any,
"all" => Method::All,
_ => panic!(),
};
let mut g = init_square_sum_path(limit);
let s: Vec<usize> = squares().take_while(|&x| x <= (limit * 2) - 1).collect();
// Prime the graph up to the start of the search
for _ in 1..start {
add_square_sum_node(&mut g, &s);
}
let mut ham = None; // Cache for previous loop's path
match method {
Method::All => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
let paths = find_all_paths(&g);
if !paths.is_empty() {
let next_num = g.node_count() + 1;
let relevant_squares: Vec<_> = squares()
.skip_while(|&sq| sq <= next_num)
.take_while(|&sq| sq <= (next_num * 2) - 1)
.collect();
let magic_paths: Vec<_> = paths
.iter()
.filter(|&p| {
relevant_squares
.iter()
.any(|sq| *p.first().unwrap() == sq - next_num || *p.last().unwrap() == sq - next_num)
})
.collect();
if magic_paths.is_empty() {
println!("{} has no magic paths", g.node_count());
} else {
println!("{} has {} magic paths", g.node_count(), magic_paths.len());
}
}
}
},
Method::Any => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
ham = find_any_path(&g, ham);
}
}
}
let end_time = PreciseTime::now();
println!("{} seconds.", start_time.to(end_time).num_seconds());
}
fn find_any_path<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
ham: Option<Vec<usize>>,
) -> Option<Vec<usize>>
where
Ty: petgraph::EdgeType,
{
match find_hamiltonian(g, ham) {
Ok(h) => Some(h),
Err(e) => {
println!("{} fails with {}", g.node_count(), e);
None
}
}
}
fn find_all_paths<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> HashSet<std::vec::Vec<usize>>
where
Ty: petgraph::EdgeType,
{
let mut tries = 0;
let mut failed_tries = 0;
let mut paths = HashSet::new();
loop {
tries += 1;
let ham = match find_hamiltonian(g, None) {
Ok(h) => Some(h),
Err(_) => None,
};
if let Some(mut p) = ham.clone() {
if p.first().unwrap() > p.last().unwrap() {
p.reverse();
}
if paths.insert(p) {
failed_tries = 0;
} else {
failed_tries += 1;
}
} else {
failed_tries += 1;
}
if failed_tries > max(3, (tries as f32 * 0.7) as usize) {
break;
}
}
println!(
"{} has {} paths from {} tries",
g.node_count(),
paths.len(),
tries
);
paths
}
fn integers() -> std::ops::Range<usize> {
1..usize::max_value()
}
fn squares() -> std::iter::Map<std::ops::Range<usize>, fn(usize) -> usize> {
integers().map(|x| x * x)
}
fn init_square_sum_path(n: usize) -> petgraph::Graph<(), (), petgraph::Undirected, usize> {
let num_edges: usize = integers()
.take(n)
.map(|i| {
f64::floor(f64::sqrt(((i * 2) - 1) as f64)) as usize
- f64::floor(f64::sqrt(i as f64)) as usize
})
.sum();
petgraph::Graph::with_capacity(n, num_edges)
}
fn | (
g: &mut petgraph::Graph<(), (), petgraph::Undirected, usize>,
square_numbers: &[usize],
) {
let i = g.node_count() + 1;
g.add_node(());
for sq in square_numbers
.iter()
.skip_while(|&sq| sq <= &i)
.take_while(|&sq| sq <= &((i * 2) - 1))
{
let i_index = petgraph::graph::node_index(i - 1);
let j_index = petgraph::graph::node_index(sq - i - 1);
g.update_edge(i_index, j_index, ());
}
}
struct Path {
path: Vec<usize>,
member: Vec<bool>,
}
impl Path {
fn new(size: usize) -> Path {
Path {
path: Vec::with_capacity(size),
member: vec![false; size],
}
}
fn from_seed(seed: &[usize], size: usize) -> Path {
// TODO check that size >= seed.len()
let mut path = Vec::with_capacity(size);
let mut member = vec![false; size];
for i in seed.iter() {
path.push(i - 1);
member[*i - 1] = true;
}
Path { path, member }
}
fn push(&mut self, node_index: usize) {
self.path.push(node_index);
self.member[node_index] = true;
}
fn len(&self) -> usize {
self.path.len()
}
fn contains(&self, node_index: usize) -> bool {
self.member[node_index]
}
fn backtrack(&mut self, amount: usize) {
let actual_backtrack_amount = min(amount, self.path.len() - 2);
for i in &self.path[(self.path.len() - actual_backtrack_amount)..] {
self.member[*i] = false;
}
let new_size = self.path.len() - actual_backtrack_amount;
self.path.truncate(new_size);
}
fn reverse(&mut self) {
self.path.reverse();
}
fn iter(&self) -> std::slice::Iter<usize> {
self.path.iter()
}
}
fn setup_path<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> Result<Path, &'static str>
where
Ty: petgraph::EdgeType,
{
let mut rng = rand::thread_rng();
let start = petgraph::graph::node_index(rng.gen_range(0, g.node_count()));
let neighbours = g.neighbors(start).collect::<Vec<_>>();
let next = rng.choose(&neighbours).ok_or("Node had no neighbours!")?;
let mut path = Path::new(g.node_count());
path.push(start.index());
path.push(next.index());
Ok(path)
}
fn find_hamiltonian<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
seed: Option<Vec<usize>>,
) -> Result<Vec<usize>, &'static str>
where
Ty: petgraph::EdgeType,
{
if petgraph::algo::connected_components(&g) != 1 {
return Err("Not a fully-connected graph");
}
let reverse_rate = max(100, g.node_count() / 1000);
let backtrack_rate = max(1000, g.node_count() / 100);
let backtrack_amount = max(5, g.node_count() / 10_000);
let reset_rate = g.node_count() * 10; // Must be larger than num nodes
let max_iterations = reset_rate * 5;
let mut rng = rand::thread_rng();
let mut path = match seed {
Some(s) => Path::from_seed(&s, g.node_count()),
None => setup_path(g)?,
};
let mut longest_path: Vec<usize> = Vec::with_capacity(g.node_count());
let mut iteration = 0;
let mut resets = 0;
loop {
// Reverse the path often
if iteration % reverse_rate == 0 {
path.reverse();
}
// Reset the search occasionally
if iteration > reset_rate {
iteration = 1;
resets += 1;
path = setup_path(g)?;
continue;
}
// Backtrack a smidge now and again
if iteration % backtrack_rate == | add_square_sum_node | identifier_name |
main.rs | ::with_name("find")
.short("f")
.long("find")
.value_name("METHOD")
.default_value("any")
.possible_values(&["any", "all"])
.help("Whether to find *all* paths for each graph or *any* path for each graph"))
.get_matches();
let start_time = PreciseTime::now();
let start: usize = matches.value_of("start").unwrap().parse().expect("Could not parse start value");
let limit: usize = matches.value_of("end").unwrap().parse().expect("Could not parse end value");
let method = match matches.value_of("find").unwrap() {
"any" => Method::Any,
"all" => Method::All,
_ => panic!(),
};
let mut g = init_square_sum_path(limit);
let s: Vec<usize> = squares().take_while(|&x| x <= (limit * 2) - 1).collect();
// Prime the graph up to the start of the search
for _ in 1..start {
add_square_sum_node(&mut g, &s);
}
let mut ham = None; // Cache for previous loop's path
match method {
Method::All => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
let paths = find_all_paths(&g);
if !paths.is_empty() {
let next_num = g.node_count() + 1;
let relevant_squares: Vec<_> = squares()
.skip_while(|&sq| sq <= next_num)
.take_while(|&sq| sq <= (next_num * 2) - 1)
.collect();
let magic_paths: Vec<_> = paths
.iter()
.filter(|&p| {
relevant_squares
.iter()
.any(|sq| *p.first().unwrap() == sq - next_num || *p.last().unwrap() == sq - next_num)
})
.collect();
if magic_paths.is_empty() {
println!("{} has no magic paths", g.node_count());
} else {
println!("{} has {} magic paths", g.node_count(), magic_paths.len());
}
}
}
},
Method::Any => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
ham = find_any_path(&g, ham);
}
}
}
let end_time = PreciseTime::now();
println!("{} seconds.", start_time.to(end_time).num_seconds());
}
fn find_any_path<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
ham: Option<Vec<usize>>,
) -> Option<Vec<usize>>
where
Ty: petgraph::EdgeType,
{
match find_hamiltonian(g, ham) {
Ok(h) => Some(h),
Err(e) => {
println!("{} fails with {}", g.node_count(), e);
None
}
}
}
fn find_all_paths<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> HashSet<std::vec::Vec<usize>>
where
Ty: petgraph::EdgeType,
{
let mut tries = 0;
let mut failed_tries = 0;
let mut paths = HashSet::new();
loop {
tries += 1;
let ham = match find_hamiltonian(g, None) {
Ok(h) => Some(h),
Err(_) => None,
};
if let Some(mut p) = ham.clone() {
if p.first().unwrap() > p.last().unwrap() {
p.reverse();
}
if paths.insert(p) {
failed_tries = 0;
} else {
failed_tries += 1;
}
} else {
failed_tries += 1;
}
if failed_tries > max(3, (tries as f32 * 0.7) as usize) {
break;
}
}
println!(
"{} has {} paths from {} tries",
g.node_count(),
paths.len(),
tries
);
paths
}
fn integers() -> std::ops::Range<usize> {
1..usize::max_value()
}
fn squares() -> std::iter::Map<std::ops::Range<usize>, fn(usize) -> usize> {
integers().map(|x| x * x)
}
fn init_square_sum_path(n: usize) -> petgraph::Graph<(), (), petgraph::Undirected, usize> {
let num_edges: usize = integers()
.take(n)
.map(|i| {
f64::floor(f64::sqrt(((i * 2) - 1) as f64)) as usize
- f64::floor(f64::sqrt(i as f64)) as usize
})
.sum();
petgraph::Graph::with_capacity(n, num_edges)
}
fn add_square_sum_node(
g: &mut petgraph::Graph<(), (), petgraph::Undirected, usize>,
square_numbers: &[usize],
) {
let i = g.node_count() + 1;
g.add_node(());
for sq in square_numbers
.iter()
.skip_while(|&sq| sq <= &i)
.take_while(|&sq| sq <= &((i * 2) - 1))
{
let i_index = petgraph::graph::node_index(i - 1);
let j_index = petgraph::graph::node_index(sq - i - 1);
g.update_edge(i_index, j_index, ());
}
}
struct Path {
path: Vec<usize>,
member: Vec<bool>,
}
impl Path {
fn new(size: usize) -> Path {
Path {
path: Vec::with_capacity(size),
member: vec![false; size],
}
}
fn from_seed(seed: &[usize], size: usize) -> Path |
fn push(&mut self, node_index: usize) {
self.path.push(node_index);
self.member[node_index] = true;
}
fn len(&self) -> usize {
self.path.len()
}
fn contains(&self, node_index: usize) -> bool {
self.member[node_index]
}
fn backtrack(&mut self, amount: usize) {
let actual_backtrack_amount = min(amount, self.path.len() - 2);
for i in &self.path[(self.path.len() - actual_backtrack_amount)..] {
self.member[*i] = false;
}
let new_size = self.path.len() - actual_backtrack_amount;
self.path.truncate(new_size);
}
fn reverse(&mut self) {
self.path.reverse();
}
fn iter(&self) -> std::slice::Iter<usize> {
self.path.iter()
}
}
fn setup_path<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> Result<Path, &'static str>
where
Ty: petgraph::EdgeType,
{
let mut rng = rand::thread_rng();
let start = petgraph::graph::node_index(rng.gen_range(0, g.node_count()));
let neighbours = g.neighbors(start).collect::<Vec<_>>();
let next = rng.choose(&neighbours).ok_or("Node had no neighbours!")?;
let mut path = Path::new(g.node_count());
path.push(start.index());
path.push(next.index());
Ok(path)
}
fn find_hamiltonian<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
seed: Option<Vec<usize>>,
) -> Result<Vec<usize>, &'static str>
where
Ty: petgraph::EdgeType,
{
if petgraph::algo::connected_components(&g) != 1 {
return Err("Not a fully-connected graph");
}
let reverse_rate = max(100, g.node_count() / 1000);
let backtrack_rate = max(1000, g.node_count() / 100);
let backtrack_amount = max(5, g.node_count() / 10_000);
let reset_rate = g.node_count() * 10; // Must be larger than num nodes
let max_iterations = reset_rate * 5;
let mut rng = rand::thread_rng();
let mut path = match seed {
Some(s) => Path::from_seed(&s, g.node_count()),
None => setup_path(g)?,
};
let mut longest_path: Vec<usize> = Vec::with_capacity(g.node_count());
let mut iteration = 0;
let mut resets = 0;
loop {
// Reverse the path often
if iteration % reverse_rate == 0 {
path.reverse();
}
// Reset the search occasionally
if iteration > reset_rate {
iteration = 1;
resets += 1;
path = setup_path(g)?;
continue;
}
// Backtrack a smidge now and again
if iteration % backtrack_rate == | {
// TODO check that size >= seed.len()
let mut path = Vec::with_capacity(size);
let mut member = vec![false; size];
for i in seed.iter() {
path.push(i - 1);
member[*i - 1] = true;
}
Path { path, member }
} | identifier_body |
main.rs | ::with_name("find")
.short("f")
.long("find")
.value_name("METHOD")
.default_value("any")
.possible_values(&["any", "all"])
.help("Whether to find *all* paths for each graph or *any* path for each graph"))
.get_matches();
let start_time = PreciseTime::now();
let start: usize = matches.value_of("start").unwrap().parse().expect("Could not parse start value");
let limit: usize = matches.value_of("end").unwrap().parse().expect("Could not parse end value");
let method = match matches.value_of("find").unwrap() {
"any" => Method::Any,
"all" => Method::All,
_ => panic!(),
};
let mut g = init_square_sum_path(limit);
let s: Vec<usize> = squares().take_while(|&x| x <= (limit * 2) - 1).collect();
// Prime the graph up to the start of the search
for _ in 1..start {
add_square_sum_node(&mut g, &s);
}
let mut ham = None; // Cache for previous loop's path
match method {
Method::All => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
let paths = find_all_paths(&g);
if !paths.is_empty() {
let next_num = g.node_count() + 1;
let relevant_squares: Vec<_> = squares()
.skip_while(|&sq| sq <= next_num)
.take_while(|&sq| sq <= (next_num * 2) - 1)
.collect();
let magic_paths: Vec<_> = paths
.iter()
.filter(|&p| {
relevant_squares
.iter()
.any(|sq| *p.first().unwrap() == sq - next_num || *p.last().unwrap() == sq - next_num)
})
.collect();
if magic_paths.is_empty() {
println!("{} has no magic paths", g.node_count());
} else {
println!("{} has {} magic paths", g.node_count(), magic_paths.len());
}
}
}
},
Method::Any => {
for _ in start..limit {
add_square_sum_node(&mut g, &s);
ham = find_any_path(&g, ham);
}
}
}
let end_time = PreciseTime::now();
println!("{} seconds.", start_time.to(end_time).num_seconds());
}
fn find_any_path<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
ham: Option<Vec<usize>>,
) -> Option<Vec<usize>>
where
Ty: petgraph::EdgeType,
{
match find_hamiltonian(g, ham) {
Ok(h) => Some(h),
Err(e) => {
println!("{} fails with {}", g.node_count(), e);
None
}
}
}
fn find_all_paths<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> HashSet<std::vec::Vec<usize>>
where
Ty: petgraph::EdgeType,
{
let mut tries = 0;
let mut failed_tries = 0;
let mut paths = HashSet::new();
loop {
tries += 1;
let ham = match find_hamiltonian(g, None) {
Ok(h) => Some(h),
Err(_) => None,
};
if let Some(mut p) = ham.clone() {
if p.first().unwrap() > p.last().unwrap() {
p.reverse();
}
if paths.insert(p) {
failed_tries = 0;
} else {
failed_tries += 1;
}
} else {
failed_tries += 1;
}
if failed_tries > max(3, (tries as f32 * 0.7) as usize) {
break;
}
}
println!(
"{} has {} paths from {} tries",
g.node_count(),
paths.len(),
tries
);
paths
}
fn integers() -> std::ops::Range<usize> {
1..usize::max_value()
}
fn squares() -> std::iter::Map<std::ops::Range<usize>, fn(usize) -> usize> {
integers().map(|x| x * x)
}
fn init_square_sum_path(n: usize) -> petgraph::Graph<(), (), petgraph::Undirected, usize> {
let num_edges: usize = integers()
.take(n)
.map(|i| {
f64::floor(f64::sqrt(((i * 2) - 1) as f64)) as usize
- f64::floor(f64::sqrt(i as f64)) as usize
})
.sum();
petgraph::Graph::with_capacity(n, num_edges)
}
fn add_square_sum_node(
g: &mut petgraph::Graph<(), (), petgraph::Undirected, usize>,
square_numbers: &[usize],
) {
let i = g.node_count() + 1;
g.add_node(());
for sq in square_numbers
.iter()
.skip_while(|&sq| sq <= &i)
.take_while(|&sq| sq <= &((i * 2) - 1))
{
let i_index = petgraph::graph::node_index(i - 1);
let j_index = petgraph::graph::node_index(sq - i - 1);
g.update_edge(i_index, j_index, ());
}
}
struct Path {
path: Vec<usize>,
member: Vec<bool>,
}
impl Path {
fn new(size: usize) -> Path {
Path {
path: Vec::with_capacity(size),
member: vec![false; size],
}
}
fn from_seed(seed: &[usize], size: usize) -> Path {
// TODO check that size >= seed.len()
let mut path = Vec::with_capacity(size);
let mut member = vec![false; size];
for i in seed.iter() {
path.push(i - 1);
member[*i - 1] = true;
}
Path { path, member }
}
fn push(&mut self, node_index: usize) {
self.path.push(node_index);
self.member[node_index] = true;
}
fn len(&self) -> usize {
self.path.len()
}
fn contains(&self, node_index: usize) -> bool {
self.member[node_index]
}
fn backtrack(&mut self, amount: usize) {
let actual_backtrack_amount = min(amount, self.path.len() - 2);
for i in &self.path[(self.path.len() - actual_backtrack_amount)..] {
self.member[*i] = false;
}
let new_size = self.path.len() - actual_backtrack_amount; | fn reverse(&mut self) {
self.path.reverse();
}
fn iter(&self) -> std::slice::Iter<usize> {
self.path.iter()
}
}
fn setup_path<N, E, Ty>(g: &petgraph::Graph<N, E, Ty, usize>) -> Result<Path, &'static str>
where
Ty: petgraph::EdgeType,
{
let mut rng = rand::thread_rng();
let start = petgraph::graph::node_index(rng.gen_range(0, g.node_count()));
let neighbours = g.neighbors(start).collect::<Vec<_>>();
let next = rng.choose(&neighbours).ok_or("Node had no neighbours!")?;
let mut path = Path::new(g.node_count());
path.push(start.index());
path.push(next.index());
Ok(path)
}
fn find_hamiltonian<N, E, Ty>(
g: &petgraph::Graph<N, E, Ty, usize>,
seed: Option<Vec<usize>>,
) -> Result<Vec<usize>, &'static str>
where
Ty: petgraph::EdgeType,
{
if petgraph::algo::connected_components(&g) != 1 {
return Err("Not a fully-connected graph");
}
let reverse_rate = max(100, g.node_count() / 1000);
let backtrack_rate = max(1000, g.node_count() / 100);
let backtrack_amount = max(5, g.node_count() / 10_000);
let reset_rate = g.node_count() * 10; // Must be larger than num nodes
let max_iterations = reset_rate * 5;
let mut rng = rand::thread_rng();
let mut path = match seed {
Some(s) => Path::from_seed(&s, g.node_count()),
None => setup_path(g)?,
};
let mut longest_path: Vec<usize> = Vec::with_capacity(g.node_count());
let mut iteration = 0;
let mut resets = 0;
loop {
// Reverse the path often
if iteration % reverse_rate == 0 {
path.reverse();
}
// Reset the search occasionally
if iteration > reset_rate {
iteration = 1;
resets += 1;
path = setup_path(g)?;
continue;
}
// Backtrack a smidge now and again
if iteration % backtrack_rate == 0 | self.path.truncate(new_size);
}
| random_line_split |
_bev_qfz.py | self.delta_ground = delta_ground
self.delta_h_circle = delta_h_circle
self.nl = nl
def find_min_z(zL, step):
# TODO: Ask Bea the reason why this function. Apparently minPercent is not used
# histogram of zL, step = 0.2. minZ is set to the value over 0
# with at maximum 5% of points under it.
mybins = np.arange(np.amin(zL), np.amax(zL), step)
myhisto = np.histogram(zL, mybins)
mycount = myhisto[0]
idx = np.where(mycount > 100)
minZ = myhisto[1][idx[0][0]]
return minZ
def draw_dart(im, points, proj, h_scanner, alpha0, nb_layers=64):
"""im: as an input image just the size is important. As an output
image it contains the dart board
x0,y0,hScanner: scanner position
alpha0: first angle
res_x,res_y : spatial resolution of input image
nb_layers
The output draws a chess board according to the size of the each
"""
# x0, y0 Je l'utilise avec une image smil...
y0, x0 = get_scanner_xy(points, proj)
# 5 pixels / m, 1 px = 20 cm
res_x = proj.projector.res_x
# 5 pixels / m , 1 px = 20 cm
# res_y = proj.projector.res_y
res_alpha = 26.9 / nb_layers
radius_index = {}
for i in range(nb_layers):
angle = alpha0 - (res_alpha * i)
angle_rad = ((90-angle) * np.pi) / 180.0
radius = int(np.round(abs(h_scanner * np.tan(angle_rad) * res_x)))
if radius > (im.getWidth() + im.getHeight()):
radius_index[i] = max(im.getWidth(), im.getHeight())
else:
radius_index[i] = radius
# for each distance to scanner, get the layer index
inverse_radius_index = {}
index = 0
# get the maximum index falling into the image
imsize = max(im.getHeight(), im.getWidth())
while imsize <= radius_index[index]:
index = index + 1
# for this index, get the corresponding radius
# for larger radius assign max_index+1
r = im.getHeight() + im.getWidth()
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
# each r (radius) has its layer number (inverse_radius_index).
# index0 close to horizontal, the maximum index close to vertical
while r > 0:
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
index = index + 1
if index == nb_layers:
break
# close to the scanner (masked zone)
while r >= 0:
inverse_radius_index[r] = nb_layers + 1
r = r - 1
im_label = sm.Image(im, "UINT16")
# Start faster version that generates dart
# convert the dict to a numpy array
max_r = max(inverse_radius_index.keys())
arr_inv_radius = np.zeros(max_r + 1)
for k in inverse_radius_index.keys():
arr_inv_radius[k] = inverse_radius_index[k]
# fill the image with radius and angular sector
nr, nc = smil_2_np(im).shape
np_rows = np.repeat(np.arange(nr), nc).reshape(nr, nc)
np_cols = np.repeat(np.arange(nc), nr).reshape((nr, nc), order='F')
deltax = np_cols - x0
deltay = np_rows - y0
np_theta = np.round(180+(180*np.arctan2(deltay, deltax))/(2*np.pi)).astype(int)
# smil and numpy have swapped axes
np_theta[y0, x0] = 0
np_r = np.sqrt(deltax**2 + deltay**2).astype(int)
np_r = arr_inv_radius[np_r]
im_r = np_2_smil(np_r)
im_theta = np_2_smil(np_theta)
# label 2 partitions
sm.labelWithoutFunctor2Partitions(im_r, im_theta, im_label, sm.CrossSE())
return im_label
def get_scanner_xy(points, proj):
""" get x0,y0 coordinates of the scanner location """
# Find the pixel corresponding to (x=0,y=0)
res_x = proj.projector.res_x # 5 pixels / m, 1 px = 20 cm
res_y = proj.projector.res_y # 5 pixels / m , 1 px = 20 cm
min_x, min_y, min_z = points.min(0)
# the first coordinate is associated to the row coordinate of the image
y0 = int(np.floor((0 - min_y) * res_y).astype(np.int))
# the second coordinate is associated to the column coordinate of the image
x0 = int(np.floor((0 - min_x) * res_x).astype(np.int))
return x0, y0
def compute_circle(points, proj, im_max, nl):
# je l'utilise avec une image smil... (y et x inverse expres)
y0, x0 = get_scanner_xy(points, proj)
# Get the circle where the scanner is located
im_mark = sm.Image(im_max)
im_tmp, im_circle = sm.Image(im_max), sm.Image(im_max)
im_mark.setPixel(x0, y0, 255)
# empty pixels
sm.compare(im_max, "==", 0, 255, 0, im_tmp)
# get the circle
sm.build(im_mark, im_tmp, im_circle, nl)
# Pb circle trop grand (image 900. Restreindre à une fenetre de 10m x 10m
sm.fill(im_tmp, 0)
circle_size = int(5.5 * proj.projector.res_x)
xinit, yinit = x0 - circle_size, y0 - circle_size
sm.copy(im_circle, xinit, yinit, 2*circle_size, 2*circle_size, im_tmp, xinit, yinit)
sm.copy(im_tmp, im_circle)
return im_circle
def dart_interp(points, proj, im, im_interp, nl):
""" input: points 3D. Required to compute the x0,y0 of the scanner
im: the image to be interpolated
imInterp: the output image
nl: neighborhood
Each chess board sector takes the value of the pixel inside, but only if it is alone
"""
# Une classe avec toute cette info serait utile, plutot que de definir ces variables plusieurs fois...
nb_layers = 64
alpha0 = 0
h_scanner = 1.73
# get chess board ## TODO: define immax
im_dart = draw_dart(im, points, proj, h_scanner, alpha0, nb_layers)
mymax = sm.maxVal(im)
sm.compare(im, "==", 0, mymax + 1, im, im)
# propagation de la valeur min (!=0) sur toute la cellule
label_with_measure(im_dart, im, im_interp, "min", nl)
# BMI
sm.compare(im_interp, "==", mymax + 1, 0, im_interp, im_interp) # empty cells have max-value
sm.compare(im, "==", mymax + 1, 0, im, im) # mymax+1 -> 0 again
im_obj = sm.Image(im)
sm.sub(im, im_interp, im_obj)
sm.compare(im, "==", 0, im_interp, im, im_interp) # only empty pixels are interpolated
# return im_chess, imObj
return im_dart, im_obj
def im_dart_interp(points, proj, im_max, nl):
im_interp = sm.Image(im_max)
im_dart, im_obj = dart_interp(points, proj, im_max, im_interp, nl)
return im_interp, im_dart, im_obj
def ground_detection_min_circle(params, points, proj, res_z, im_min, im_max):
"""
Parameters
----------
params: LambdaGDParameters
points: ndarray
proj: Projection
res_z: float
im_min: sm.Image
im_max: sm.Image
"""
my_lambda, nl = params.my_lambda, params.nl
im_ground = sm.Image(im_min)
im_circle = compute_circle(points, proj, im_max, nl)
im_tmp = sm.Image(im_circle)
|
class LambdaGDParameters:
def __init__(self, my_lambda=2, delta_ground=0.2, delta_h_circle=0.5, nl=sm.HexSE()):
self.my_lambda = my_lambda | random_line_split |
|
_bev_qfz.py |
# for each distance to scanner, get the layer index
inverse_radius_index = {}
index = 0
# get the maximum index falling into the image
imsize = max(im.getHeight(), im.getWidth())
while imsize <= radius_index[index]:
index = index + 1
# for this index, get the corresponding radius
# for larger radius assign max_index+1
r = im.getHeight() + im.getWidth()
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
# each r (radius) has its layer number (inverse_radius_index).
# index0 close to horizontal, the maximum index close to vertical
while r > 0:
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
index = index + 1
if index == nb_layers:
break
# close to the scanner (masked zone)
while r >= 0:
inverse_radius_index[r] = nb_layers + 1
r = r - 1
im_label = sm.Image(im, "UINT16")
# Start faster version that generates dart
# convert the dict to a numpy array
max_r = max(inverse_radius_index.keys())
arr_inv_radius = np.zeros(max_r + 1)
for k in inverse_radius_index.keys():
arr_inv_radius[k] = inverse_radius_index[k]
# fill the image with radius and angular sector
nr, nc = smil_2_np(im).shape
np_rows = np.repeat(np.arange(nr), nc).reshape(nr, nc)
np_cols = np.repeat(np.arange(nc), nr).reshape((nr, nc), order='F')
deltax = np_cols - x0
deltay = np_rows - y0
np_theta = np.round(180+(180*np.arctan2(deltay, deltax))/(2*np.pi)).astype(int)
# smil and numpy have swapped axes
np_theta[y0, x0] = 0
np_r = np.sqrt(deltax**2 + deltay**2).astype(int)
np_r = arr_inv_radius[np_r]
im_r = np_2_smil(np_r)
im_theta = np_2_smil(np_theta)
# label 2 partitions
sm.labelWithoutFunctor2Partitions(im_r, im_theta, im_label, sm.CrossSE())
return im_label
def get_scanner_xy(points, proj):
""" get x0,y0 coordinates of the scanner location """
# Find the pixel corresponding to (x=0,y=0)
res_x = proj.projector.res_x # 5 pixels / m, 1 px = 20 cm
res_y = proj.projector.res_y # 5 pixels / m , 1 px = 20 cm
min_x, min_y, min_z = points.min(0)
# the first coordinate is associated to the row coordinate of the image
y0 = int(np.floor((0 - min_y) * res_y).astype(np.int))
# the second coordinate is associated to the column coordinate of the image
x0 = int(np.floor((0 - min_x) * res_x).astype(np.int))
return x0, y0
def compute_circle(points, proj, im_max, nl):
# je l'utilise avec une image smil... (y et x inverse expres)
y0, x0 = get_scanner_xy(points, proj)
# Get the circle where the scanner is located
im_mark = sm.Image(im_max)
im_tmp, im_circle = sm.Image(im_max), sm.Image(im_max)
im_mark.setPixel(x0, y0, 255)
# empty pixels
sm.compare(im_max, "==", 0, 255, 0, im_tmp)
# get the circle
sm.build(im_mark, im_tmp, im_circle, nl)
# Pb circle trop grand (image 900. Restreindre à une fenetre de 10m x 10m
sm.fill(im_tmp, 0)
circle_size = int(5.5 * proj.projector.res_x)
xinit, yinit = x0 - circle_size, y0 - circle_size
sm.copy(im_circle, xinit, yinit, 2*circle_size, 2*circle_size, im_tmp, xinit, yinit)
sm.copy(im_tmp, im_circle)
return im_circle
def dart_interp(points, proj, im, im_interp, nl):
""" input: points 3D. Required to compute the x0,y0 of the scanner
im: the image to be interpolated
imInterp: the output image
nl: neighborhood
Each chess board sector takes the value of the pixel inside, but only if it is alone
"""
# Une classe avec toute cette info serait utile, plutot que de definir ces variables plusieurs fois...
nb_layers = 64
alpha0 = 0
h_scanner = 1.73
# get chess board ## TODO: define immax
im_dart = draw_dart(im, points, proj, h_scanner, alpha0, nb_layers)
mymax = sm.maxVal(im)
sm.compare(im, "==", 0, mymax + 1, im, im)
# propagation de la valeur min (!=0) sur toute la cellule
label_with_measure(im_dart, im, im_interp, "min", nl)
# BMI
sm.compare(im_interp, "==", mymax + 1, 0, im_interp, im_interp) # empty cells have max-value
sm.compare(im, "==", mymax + 1, 0, im, im) # mymax+1 -> 0 again
im_obj = sm.Image(im)
sm.sub(im, im_interp, im_obj)
sm.compare(im, "==", 0, im_interp, im, im_interp) # only empty pixels are interpolated
# return im_chess, imObj
return im_dart, im_obj
def im_dart_interp(points, proj, im_max, nl):
im_interp = sm.Image(im_max)
im_dart, im_obj = dart_interp(points, proj, im_max, im_interp, nl)
return im_interp, im_dart, im_obj
def ground_detection_min_circle(params, points, proj, res_z, im_min, im_max):
"""
Parameters
----------
params: LambdaGDParameters
points: ndarray
proj: Projection
res_z: float
im_min: sm.Image
im_max: sm.Image
"""
my_lambda, nl = params.my_lambda, params.nl
im_ground = sm.Image(im_min)
im_circle = compute_circle(points, proj, im_max, nl)
im_tmp = sm.Image(im_circle)
# NEW:
sm.dilate(im_circle, im_tmp, nl(1 * proj.res_x))
# OLD: sm.dilate(im_circle, im_tmp, nl(4))
sm.compare(im_tmp, ">", im_circle, im_max, 0, im_tmp)
histo = sm.histogram(im_tmp)
del (histo[0])
hist_keys = histo.keys()
hist_val = histo.values()
my_min = 0
for k in range(len(hist_keys)):
if hist_val[k] > 0:
my_min = hist_keys[k]
break
# NEW:
delta = int(params.delta_h_circle * res_z)
sm.threshold(im_tmp, my_min, min(255, my_min + delta), im_ground)
# OLD:
# sm.threshold(im_tmp, my_min, min(255, my_min + 5), im_ground)
sm.sub(im_max, im_min, im_tmp)
# put to zero all the non zero pixels in imGround
sm.compare(im_tmp, ">", int(np.round(0.3 * res_z)), 0, im_ground, im_ground)
# NEW:
if proj.res_x > 1:
# open with se_2x2
se_2x2 = sm.StrElt(True, [0, 1, 2, 3])
sm.open(im_ground, im_ground, se_2x2)
# OLD:
# se_2x2 = sm.StrElt(True, [0, 1, 2, 3])
# sm.open(im_ground, im_ground, se_2x2)
im_interp, im_dart, im_obj = im_dart_interp(points, proj, im_max, nl)
# Lambda flat zones
im_label = sm.Image(im_interp, "UINT32")
sm.lambdaLabel(im_interp, my_lambda, im_label, nl)
label_with_measure(im_label, im_ground, im_ground, "max", nl)
# todo: parametrize the 3 value
sm.compare(im_obj, ">", 3, 0, im_ground, im_ground)
# empty pixels set to 0 again
sm.compare(im_max, "==", 0, 0, im_ground, im_ground)
# evaluate
# conf_mat, conf_mat_norm = evaluate(im | radius_index[i] = radius | conditional_block |
|
_bev_qfz.py | for i in range(nb_layers):
angle = alpha0 - (res_alpha * i)
angle_rad = ((90-angle) * np.pi) / 180.0
radius = int(np.round(abs(h_scanner * np.tan(angle_rad) * res_x)))
if radius > (im.getWidth() + im.getHeight()):
radius_index[i] = max(im.getWidth(), im.getHeight())
else:
radius_index[i] = radius
# for each distance to scanner, get the layer index
inverse_radius_index = {}
index = 0
# get the maximum index falling into the image
imsize = max(im.getHeight(), im.getWidth())
while imsize <= radius_index[index]:
index = index + 1
# for this index, get the corresponding radius
# for larger radius assign max_index+1
r = im.getHeight() + im.getWidth()
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
# each r (radius) has its layer number (inverse_radius_index).
# index0 close to horizontal, the maximum index close to vertical
while r > 0:
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
index = index + 1
if index == nb_layers:
break
# close to the scanner (masked zone)
while r >= 0:
inverse_radius_index[r] = nb_layers + 1
r = r - 1
im_label = sm.Image(im, "UINT16")
# Start faster version that generates dart
# convert the dict to a numpy array
max_r = max(inverse_radius_index.keys())
arr_inv_radius = np.zeros(max_r + 1)
for k in inverse_radius_index.keys():
arr_inv_radius[k] = inverse_radius_index[k]
# fill the image with radius and angular sector
nr, nc = smil_2_np(im).shape
np_rows = np.repeat(np.arange(nr), nc).reshape(nr, nc)
np_cols = np.repeat(np.arange(nc), nr).reshape((nr, nc), order='F')
deltax = np_cols - x0
deltay = np_rows - y0
np_theta = np.round(180+(180*np.arctan2(deltay, deltax))/(2*np.pi)).astype(int)
# smil and numpy have swapped axes
np_theta[y0, x0] = 0
np_r = np.sqrt(deltax**2 + deltay**2).astype(int)
np_r = arr_inv_radius[np_r]
im_r = np_2_smil(np_r)
im_theta = np_2_smil(np_theta)
# label 2 partitions
sm.labelWithoutFunctor2Partitions(im_r, im_theta, im_label, sm.CrossSE())
return im_label
def get_scanner_xy(points, proj):
""" get x0,y0 coordinates of the scanner location """
# Find the pixel corresponding to (x=0,y=0)
res_x = proj.projector.res_x # 5 pixels / m, 1 px = 20 cm
res_y = proj.projector.res_y # 5 pixels / m , 1 px = 20 cm
min_x, min_y, min_z = points.min(0)
# the first coordinate is associated to the row coordinate of the image
y0 = int(np.floor((0 - min_y) * res_y).astype(np.int))
# the second coordinate is associated to the column coordinate of the image
x0 = int(np.floor((0 - min_x) * res_x).astype(np.int))
return x0, y0
def compute_circle(points, proj, im_max, nl):
# je l'utilise avec une image smil... (y et x inverse expres)
y0, x0 = get_scanner_xy(points, proj)
# Get the circle where the scanner is located
im_mark = sm.Image(im_max)
im_tmp, im_circle = sm.Image(im_max), sm.Image(im_max)
im_mark.setPixel(x0, y0, 255)
# empty pixels
sm.compare(im_max, "==", 0, 255, 0, im_tmp)
# get the circle
sm.build(im_mark, im_tmp, im_circle, nl)
# Pb circle trop grand (image 900. Restreindre à une fenetre de 10m x 10m
sm.fill(im_tmp, 0)
circle_size = int(5.5 * proj.projector.res_x)
xinit, yinit = x0 - circle_size, y0 - circle_size
sm.copy(im_circle, xinit, yinit, 2*circle_size, 2*circle_size, im_tmp, xinit, yinit)
sm.copy(im_tmp, im_circle)
return im_circle
def dart_interp(points, proj, im, im_interp, nl):
""" input: points 3D. Required to compute the x0,y0 of the scanner
im: the image to be interpolated
imInterp: the output image
nl: neighborhood
Each chess board sector takes the value of the pixel inside, but only if it is alone
"""
# Une classe avec toute cette info serait utile, plutot que de definir ces variables plusieurs fois...
nb_layers = 64
alpha0 = 0
h_scanner = 1.73
# get chess board ## TODO: define immax
im_dart = draw_dart(im, points, proj, h_scanner, alpha0, nb_layers)
mymax = sm.maxVal(im)
sm.compare(im, "==", 0, mymax + 1, im, im)
# propagation de la valeur min (!=0) sur toute la cellule
label_with_measure(im_dart, im, im_interp, "min", nl)
# BMI
sm.compare(im_interp, "==", mymax + 1, 0, im_interp, im_interp) # empty cells have max-value
sm.compare(im, "==", mymax + 1, 0, im, im) # mymax+1 -> 0 again
im_obj = sm.Image(im)
sm.sub(im, im_interp, im_obj)
sm.compare(im, "==", 0, im_interp, im, im_interp) # only empty pixels are interpolated
# return im_chess, imObj
return im_dart, im_obj
def im_dart_interp(points, proj, im_max, nl):
im_interp = sm.Image(im_max)
im_dart, im_obj = dart_interp(points, proj, im_max, im_interp, nl)
return im_interp, im_dart, im_obj
def ground_detection_min_circle(params, points, proj, res_z, im_min, im_max):
"""
Parameters
----------
params: LambdaGDParameters
points: ndarray
proj: Projection
res_z: float
im_min: sm.Image
im_max: sm.Image
"""
my_lambda, nl = params.my_lambda, params.nl
im_ground = sm.Image(im_min)
im_circle = compute_circle(points, proj, im_max, nl)
im_tmp = sm.Image(im_circle)
# NEW:
sm.dilate(im_circle, im_tmp, nl(1 * proj.res_x))
# OLD: sm.dilate(im_circle, im_tmp, nl(4))
sm.compare(im_tmp, ">", im_circle, im_max, 0, im_tmp)
histo = sm.histogram(im_tmp)
del (histo[0])
hist_keys = histo.keys()
hist_val = histo.values()
my_min = 0
for k in range(len(hist_keys)):
if hist_val[k] > 0:
my_min = hist_keys[k]
break
# NEW:
delta = int(params.delta_h_circle * res_z)
sm.threshold(im_tmp, my_min, min(255, my_min + delta), im_ground)
# OLD:
# sm.threshold(im_tmp, my_min, min(255, my_min + 5), im_ground)
sm.sub(im_max, im_min, im_tmp)
# put to zero all the non zero pixels in imGround
sm.compare(im_tmp, ">", int(np.round(0.3 * res_z | """im: as an input image just the size is important. As an output
image it contains the dart board
x0,y0,hScanner: scanner position
alpha0: first angle
res_x,res_y : spatial resolution of input image
nb_layers
The output draws a chess board according to the size of the each
"""
# x0, y0 Je l'utilise avec une image smil...
y0, x0 = get_scanner_xy(points, proj)
# 5 pixels / m, 1 px = 20 cm
res_x = proj.projector.res_x
# 5 pixels / m , 1 px = 20 cm
# res_y = proj.projector.res_y
res_alpha = 26.9 / nb_layers
radius_index = {} | identifier_body |
|
_bev_qfz.py | (zL, step):
# TODO: Ask Bea the reason why this function. Apparently minPercent is not used
# histogram of zL, step = 0.2. minZ is set to the value over 0
# with at maximum 5% of points under it.
mybins = np.arange(np.amin(zL), np.amax(zL), step)
myhisto = np.histogram(zL, mybins)
mycount = myhisto[0]
idx = np.where(mycount > 100)
minZ = myhisto[1][idx[0][0]]
return minZ
def draw_dart(im, points, proj, h_scanner, alpha0, nb_layers=64):
"""im: as an input image just the size is important. As an output
image it contains the dart board
x0,y0,hScanner: scanner position
alpha0: first angle
res_x,res_y : spatial resolution of input image
nb_layers
The output draws a chess board according to the size of the each
"""
# x0, y0 Je l'utilise avec une image smil...
y0, x0 = get_scanner_xy(points, proj)
# 5 pixels / m, 1 px = 20 cm
res_x = proj.projector.res_x
# 5 pixels / m , 1 px = 20 cm
# res_y = proj.projector.res_y
res_alpha = 26.9 / nb_layers
radius_index = {}
for i in range(nb_layers):
angle = alpha0 - (res_alpha * i)
angle_rad = ((90-angle) * np.pi) / 180.0
radius = int(np.round(abs(h_scanner * np.tan(angle_rad) * res_x)))
if radius > (im.getWidth() + im.getHeight()):
radius_index[i] = max(im.getWidth(), im.getHeight())
else:
radius_index[i] = radius
# for each distance to scanner, get the layer index
inverse_radius_index = {}
index = 0
# get the maximum index falling into the image
imsize = max(im.getHeight(), im.getWidth())
while imsize <= radius_index[index]:
index = index + 1
# for this index, get the corresponding radius
# for larger radius assign max_index+1
r = im.getHeight() + im.getWidth()
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
# each r (radius) has its layer number (inverse_radius_index).
# index0 close to horizontal, the maximum index close to vertical
while r > 0:
while r > radius_index[index]:
inverse_radius_index[r] = index + 1
r = r - 1
index = index + 1
if index == nb_layers:
break
# close to the scanner (masked zone)
while r >= 0:
inverse_radius_index[r] = nb_layers + 1
r = r - 1
im_label = sm.Image(im, "UINT16")
# Start faster version that generates dart
# convert the dict to a numpy array
max_r = max(inverse_radius_index.keys())
arr_inv_radius = np.zeros(max_r + 1)
for k in inverse_radius_index.keys():
arr_inv_radius[k] = inverse_radius_index[k]
# fill the image with radius and angular sector
nr, nc = smil_2_np(im).shape
np_rows = np.repeat(np.arange(nr), nc).reshape(nr, nc)
np_cols = np.repeat(np.arange(nc), nr).reshape((nr, nc), order='F')
deltax = np_cols - x0
deltay = np_rows - y0
np_theta = np.round(180+(180*np.arctan2(deltay, deltax))/(2*np.pi)).astype(int)
# smil and numpy have swapped axes
np_theta[y0, x0] = 0
np_r = np.sqrt(deltax**2 + deltay**2).astype(int)
np_r = arr_inv_radius[np_r]
im_r = np_2_smil(np_r)
im_theta = np_2_smil(np_theta)
# label 2 partitions
sm.labelWithoutFunctor2Partitions(im_r, im_theta, im_label, sm.CrossSE())
return im_label
def get_scanner_xy(points, proj):
""" get x0,y0 coordinates of the scanner location """
# Find the pixel corresponding to (x=0,y=0)
res_x = proj.projector.res_x # 5 pixels / m, 1 px = 20 cm
res_y = proj.projector.res_y # 5 pixels / m , 1 px = 20 cm
min_x, min_y, min_z = points.min(0)
# the first coordinate is associated to the row coordinate of the image
y0 = int(np.floor((0 - min_y) * res_y).astype(np.int))
# the second coordinate is associated to the column coordinate of the image
x0 = int(np.floor((0 - min_x) * res_x).astype(np.int))
return x0, y0
def compute_circle(points, proj, im_max, nl):
# je l'utilise avec une image smil... (y et x inverse expres)
y0, x0 = get_scanner_xy(points, proj)
# Get the circle where the scanner is located
im_mark = sm.Image(im_max)
im_tmp, im_circle = sm.Image(im_max), sm.Image(im_max)
im_mark.setPixel(x0, y0, 255)
# empty pixels
sm.compare(im_max, "==", 0, 255, 0, im_tmp)
# get the circle
sm.build(im_mark, im_tmp, im_circle, nl)
# Pb circle trop grand (image 900. Restreindre à une fenetre de 10m x 10m
sm.fill(im_tmp, 0)
circle_size = int(5.5 * proj.projector.res_x)
xinit, yinit = x0 - circle_size, y0 - circle_size
sm.copy(im_circle, xinit, yinit, 2*circle_size, 2*circle_size, im_tmp, xinit, yinit)
sm.copy(im_tmp, im_circle)
return im_circle
def dart_interp(points, proj, im, im_interp, nl):
""" input: points 3D. Required to compute the x0,y0 of the scanner
im: the image to be interpolated
imInterp: the output image
nl: neighborhood
Each chess board sector takes the value of the pixel inside, but only if it is alone
"""
# Une classe avec toute cette info serait utile, plutot que de definir ces variables plusieurs fois...
nb_layers = 64
alpha0 = 0
h_scanner = 1.73
# get chess board ## TODO: define immax
im_dart = draw_dart(im, points, proj, h_scanner, alpha0, nb_layers)
mymax = sm.maxVal(im)
sm.compare(im, "==", 0, mymax + 1, im, im)
# propagation de la valeur min (!=0) sur toute la cellule
label_with_measure(im_dart, im, im_interp, "min", nl)
# BMI
sm.compare(im_interp, "==", mymax + 1, 0, im_interp, im_interp) # empty cells have max-value
sm.compare(im, "==", mymax + 1, 0, im, im) # mymax+1 -> 0 again
im_obj = sm.Image(im)
sm.sub(im, im_interp, im_obj)
sm.compare(im, "==", 0, im_interp, im, im_interp) # only empty pixels are interpolated
# return im_chess, imObj
return im_dart, im_obj
def im_dart_interp(points, proj, im_max, nl):
im_interp = sm.Image(im_max)
im_dart, im_obj = dart_interp(points, proj, im_max, im_interp, nl)
return im_interp, im_dart, im_obj
def ground_detection_min_circle(params, points, proj, res_z, im_min, im_max):
"""
Parameters
----------
params: LambdaGDParameters
points: ndarray
proj: Projection
res_z: float
im_min: sm.Image
im_max: sm.Image
"""
my_lambda, nl = params.my_lambda, params.nl
im_ground = sm.Image(im_min)
im_circle = compute_circle(points, proj, im_max, nl)
im_tmp = sm.Image(im_circle)
# NEW:
sm.dilate(im_circle, im_tmp, nl(1 * proj.res_x))
# OLD: sm.dilate(im_circle, im_tmp, nl(4))
sm.compare(im_tmp, ">", im_circle, im_max, 0, im_tmp)
histo = sm.histogram(im_tmp)
del (histo | find_min_z | identifier_name |
|
particle.py | .angle = 0
self.torque = 0
def set_vel(self, vel):
self.state[2] = vel
return self
def update(self, dt):
self.t += dt
def draw(self, surface):
self.angle += self.state[2]
for i in range(0,316, 45):
x = self.center[0] + math.cos(math.radians(self.angle + i)) * self.radius
y = self.center[1] + math.sin(math.radians(self.angle + i)) * self.radius
if (len(self.lines) <= 7):
self.lines.append(pygame.draw.line(surface, BLACK, self.center, (x,y), 5))
else:
self.lines[i/45] = pygame.draw.line(surface, BLACK, self.center, (x,y), 5)
self.circle = pygame.draw.circle(surface, BLACK, self.center, (int)(self.radius*.7), 10)
def pprint(self):
print 'Wheel', self.state
class World:
def __init__(self, height, width):
self.particles = []
self.wheels =[]
self.height = height
self.width = width
self.e = .2 # Coefficient of restitution
def add(self, imgfile, radius, mass=1.0):
particle = Particle(imgfile, radius, mass)
self.particles.append(particle)
return particle
def addWheel(self, centre, radius):
wheel = Wheel(centre, radius)
self.wheels.append(wheel)
return wheel
def pprint(self):
print '#particles', len(self.particles)
for d in self.particles:
d.pprint()
def draw(self, screen):
for d in self.particles:
d.draw(screen)
for w in self.wheels:
w.draw(screen)
def update(self, dt):
t = []
for d in self.particles:
d.update(dt)
for i in range(0, len(self.particles)):
self.check_for_collision(i)
try:
for j in range(len(self.wheels)):
t.append(threading.Thread(target=self.check_wheel_collision(i, j)))
t[i].start()
except:
print "Collision detection threading error"
for x in t:
x.join()
self.check_outside_screen()
def check_outside_screen(self):
self.particles = [x for x in self.particles if self.outside_screen(x)]
def outside_screen(self, particle):
if (particle.state[0] < -particle.radius):
return False
elif (particle.state[0] > win_width + particle.radius):
return False
elif (particle.state[1] < -particle.radius):
return False
else:
return True
# check for inter-particle collision
def check_for_collision(self, i):
if (self.particles[i].state[0] - self.particles[i].radius <= 0 or
self.particles[i].state[0] + self.particles[i].radius >= 800):
self.particles[i].state[2] *= -1*self.e
elif (self.particles[i].state[1] - self.particles[i].radius <= 0):
self.particles[i].state[3] = 0
for j in range(i+1, len(self.particles)):
if i == j:
return
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.particles[j].state[0:2])
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.particles[j].radius
if dist_ij > radius_i + radius_j:
return
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = np.array(self.particles[j].state[2:])
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.particles[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
vel_j_aftercollision = vel_j - n_ij * J / mass_j
self.particles[i].set_vel(vel_i_aftercollision)
self.particles[j].set_vel(vel_j_aftercollision)
# check for particle - wheel collision
def check_wheel_collision(self, i, j):
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.wheels[j].center)
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.wheels[j].radius*.7
if dist_ij > radius_i + radius_j:
return
# ensures particles do not cross wheel boundaries
dist_in = -(dist_ij - radius_j - radius_i) # distance inside of wheel
theta = math.asin((pos_i[1] - pos_j[1]) /dist_ij) #angle from centre of wheel
newPos = [(math.cos(theta) * dist_in), (math.sin(theta) * dist_in)]
# makes sure to flip new x pos to the left
if pos_i[0] < pos_j[0]:
newPos[0] *= -1
# updates the particle position
self.particles[i].set_pos([pos_i[0] + newPos[0], pos_i[1] + newPos[1]])
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = 0
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.wheels[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
self.particles[i].set_vel(vel_i_aftercollision)
# ANGULAR COLISION #
# detect collision with lines on wheel
for x in range(len(self.wheels[j].lines)):
line = self.wheels[j].lines[x]
A = self.wheels[j].center
C = self.particles[i].state[0:2]
if A == line.topleft:
B = line.bottomright
elif A == line.bottomright:
B = line.topleft
elif A == line.topright:
B = line.bottomleft
else:
B = line.topright
dist = np.sqrt((B[0]-A[0])**2+(B[1]-A[1])**2)
Dx = (B[0]-A[0])/dist
Dy = (B[1]-A[1])/dist
t = Dx*(C[0]-A[0])+Dy*(C[1]-A[1])
Ex = t*Dx+A[0]
Ey = t*Dy+A[1]
dist2 = np.sqrt((Ex-C[0])**2+(Ey-C[1])**2)
#if (dist2 < self.particles[i].radius):
#Do conservation of momentum for angular momentum
def main():
# initializing pygame
pygame.init()
clock = pygame.time.Clock()
# top left corner is (0,0)
screen = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption('Water Wheel of Fortune')
world = World(win_height, win_width)
world.addWheel([400, 300], 200)
# spout position and width for when rain == false
spoutPos = 380
spoutWidth = 40
pause = False
rain = False # particles randomly appear at top along widith when true, spout when false
maxP = 100 # maximum number of particles
dt = 0.3
pRadius = 10 # smallest radius is 3, anything smaller is invisible
pMass = 1
# timer to create more particles
pygame.time.set_timer(pygame.USEREVENT + 1, 50)
if rain:
range = [0 + pRadius, win_width - pRadius]
else:
range = [spoutPos + pRadius, spoutPos + spoutWidth + pRadius]
print "\n\nPress P key to pause or resume"
print "Press R key to toggle rain or spout"
print "Press A or D keys to move spout left or right\n\n"
| while True:
# 30 fps
if not pause:
clock.tick(30)
| random_line_split |
|
particle.py | ,y), 5))
else:
self.lines[i/45] = pygame.draw.line(surface, BLACK, self.center, (x,y), 5)
self.circle = pygame.draw.circle(surface, BLACK, self.center, (int)(self.radius*.7), 10)
def pprint(self):
print 'Wheel', self.state
class World:
def __init__(self, height, width):
self.particles = []
self.wheels =[]
self.height = height
self.width = width
self.e = .2 # Coefficient of restitution
def add(self, imgfile, radius, mass=1.0):
particle = Particle(imgfile, radius, mass)
self.particles.append(particle)
return particle
def addWheel(self, centre, radius):
wheel = Wheel(centre, radius)
self.wheels.append(wheel)
return wheel
def pprint(self):
print '#particles', len(self.particles)
for d in self.particles:
d.pprint()
def draw(self, screen):
for d in self.particles:
d.draw(screen)
for w in self.wheels:
w.draw(screen)
def update(self, dt):
t = []
for d in self.particles:
d.update(dt)
for i in range(0, len(self.particles)):
self.check_for_collision(i)
try:
for j in range(len(self.wheels)):
t.append(threading.Thread(target=self.check_wheel_collision(i, j)))
t[i].start()
except:
print "Collision detection threading error"
for x in t:
x.join()
self.check_outside_screen()
def check_outside_screen(self):
self.particles = [x for x in self.particles if self.outside_screen(x)]
def outside_screen(self, particle):
if (particle.state[0] < -particle.radius):
return False
elif (particle.state[0] > win_width + particle.radius):
return False
elif (particle.state[1] < -particle.radius):
return False
else:
return True
# check for inter-particle collision
def check_for_collision(self, i):
if (self.particles[i].state[0] - self.particles[i].radius <= 0 or
self.particles[i].state[0] + self.particles[i].radius >= 800):
self.particles[i].state[2] *= -1*self.e
elif (self.particles[i].state[1] - self.particles[i].radius <= 0):
self.particles[i].state[3] = 0
for j in range(i+1, len(self.particles)):
if i == j:
return
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.particles[j].state[0:2])
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.particles[j].radius
if dist_ij > radius_i + radius_j:
return
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = np.array(self.particles[j].state[2:])
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.particles[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
vel_j_aftercollision = vel_j - n_ij * J / mass_j
self.particles[i].set_vel(vel_i_aftercollision)
self.particles[j].set_vel(vel_j_aftercollision)
# check for particle - wheel collision
def check_wheel_collision(self, i, j):
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.wheels[j].center)
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.wheels[j].radius*.7
if dist_ij > radius_i + radius_j:
return
# ensures particles do not cross wheel boundaries
dist_in = -(dist_ij - radius_j - radius_i) # distance inside of wheel
theta = math.asin((pos_i[1] - pos_j[1]) /dist_ij) #angle from centre of wheel
newPos = [(math.cos(theta) * dist_in), (math.sin(theta) * dist_in)]
# makes sure to flip new x pos to the left
if pos_i[0] < pos_j[0]:
newPos[0] *= -1
# updates the particle position
self.particles[i].set_pos([pos_i[0] + newPos[0], pos_i[1] + newPos[1]])
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = 0
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.wheels[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
self.particles[i].set_vel(vel_i_aftercollision)
# ANGULAR COLISION #
# detect collision with lines on wheel
for x in range(len(self.wheels[j].lines)):
line = self.wheels[j].lines[x]
A = self.wheels[j].center
C = self.particles[i].state[0:2]
if A == line.topleft:
B = line.bottomright
elif A == line.bottomright:
B = line.topleft
elif A == line.topright:
B = line.bottomleft
else:
B = line.topright
dist = np.sqrt((B[0]-A[0])**2+(B[1]-A[1])**2)
Dx = (B[0]-A[0])/dist
Dy = (B[1]-A[1])/dist
t = Dx*(C[0]-A[0])+Dy*(C[1]-A[1])
Ex = t*Dx+A[0]
Ey = t*Dy+A[1]
dist2 = np.sqrt((Ex-C[0])**2+(Ey-C[1])**2)
#if (dist2 < self.particles[i].radius):
#Do conservation of momentum for angular momentum
def main():
# initializing pygame
pygame.init()
clock = pygame.time.Clock()
# top left corner is (0,0)
screen = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption('Water Wheel of Fortune')
world = World(win_height, win_width)
world.addWheel([400, 300], 200)
# spout position and width for when rain == false
spoutPos = 380
spoutWidth = 40
pause = False
rain = False # particles randomly appear at top along widith when true, spout when false
maxP = 100 # maximum number of particles
dt = 0.3
pRadius = 10 # smallest radius is 3, anything smaller is invisible
pMass = 1
# timer to create more particles
pygame.time.set_timer(pygame.USEREVENT + 1, 50)
if rain:
range = [0 + pRadius, win_width - pRadius]
else:
range = [spoutPos + pRadius, spoutPos + spoutWidth + pRadius]
print "\n\nPress P key to pause or resume"
print "Press R key to toggle rain or spout"
print "Press A or D keys to move spout left or right\n\n"
while True:
# 30 fps
| if not pause:
clock.tick(30)
event = pygame.event.poll()
if event.type == pygame.QUIT:
sys.exit(0)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_q:
pygame.quit()
sys.exit(0)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pause = not pause
elif event.type == pygame.KEYDOWN and event.key == pygame.K_r:
rain = not rain
if rain:
range = [0 + pRadius, win_width - pRadius]
else:
range = [spoutPos + pRadius, spoutPos + spoutWidth + pRadius]
elif event.type == pygame.USEREVENT + 1 and not pause: | conditional_block |
|
particle.py | = vel
return self
def update(self, dt):
self.t += dt
self.state[3] += dt * self.gravity
self.state[0] += self.state[2] * dt
self.state[1] += self.state[3] * dt
def move_by(self, delta):
self.state[0:2] = np.add(self.pos, delta)
return self
def draw(self, surface):
rect = self.image.get_rect()
rect.center = (self.state[0], win_height-self.state[1]) # Flipping y
surface.blit(self.image, rect)
def pprint(self):
print 'Particle', self.state
class Wheel(pygame.sprite.Sprite):
def __init__(self, center, radius, mass=1000):
pygame.sprite.Sprite.__init__(self)
self.state = np.zeros(4)
self.state[0:2] = np.zeros(2) # position
self.state[2] = 1 # angular velocity
self.state[3] = 0 # angular momentum
self.lines = []
self.mass = mass
self.t = 0
self.center = center
self.radius = radius
self.angle = 0
self.torque = 0
def set_vel(self, vel):
self.state[2] = vel
return self
def update(self, dt):
self.t += dt
def draw(self, surface):
self.angle += self.state[2]
for i in range(0,316, 45):
x = self.center[0] + math.cos(math.radians(self.angle + i)) * self.radius
y = self.center[1] + math.sin(math.radians(self.angle + i)) * self.radius
if (len(self.lines) <= 7):
self.lines.append(pygame.draw.line(surface, BLACK, self.center, (x,y), 5))
else:
self.lines[i/45] = pygame.draw.line(surface, BLACK, self.center, (x,y), 5)
self.circle = pygame.draw.circle(surface, BLACK, self.center, (int)(self.radius*.7), 10)
def pprint(self):
print 'Wheel', self.state
class World:
def __init__(self, height, width):
self.particles = []
self.wheels =[]
self.height = height
self.width = width
self.e = .2 # Coefficient of restitution
def add(self, imgfile, radius, mass=1.0):
particle = Particle(imgfile, radius, mass)
self.particles.append(particle)
return particle
def addWheel(self, centre, radius):
wheel = Wheel(centre, radius)
self.wheels.append(wheel)
return wheel
def pprint(self):
print '#particles', len(self.particles)
for d in self.particles:
d.pprint()
def draw(self, screen):
for d in self.particles:
d.draw(screen)
for w in self.wheels:
w.draw(screen)
def update(self, dt):
t = []
for d in self.particles:
d.update(dt)
for i in range(0, len(self.particles)):
self.check_for_collision(i)
try:
for j in range(len(self.wheels)):
t.append(threading.Thread(target=self.check_wheel_collision(i, j)))
t[i].start()
except:
print "Collision detection threading error"
for x in t:
x.join()
self.check_outside_screen()
def check_outside_screen(self):
|
def outside_screen(self, particle):
if (particle.state[0] < -particle.radius):
return False
elif (particle.state[0] > win_width + particle.radius):
return False
elif (particle.state[1] < -particle.radius):
return False
else:
return True
# check for inter-particle collision
def check_for_collision(self, i):
if (self.particles[i].state[0] - self.particles[i].radius <= 0 or
self.particles[i].state[0] + self.particles[i].radius >= 800):
self.particles[i].state[2] *= -1*self.e
elif (self.particles[i].state[1] - self.particles[i].radius <= 0):
self.particles[i].state[3] = 0
for j in range(i+1, len(self.particles)):
if i == j:
return
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.particles[j].state[0:2])
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.particles[j].radius
if dist_ij > radius_i + radius_j:
return
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = np.array(self.particles[j].state[2:])
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.particles[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
vel_j_aftercollision = vel_j - n_ij * J / mass_j
self.particles[i].set_vel(vel_i_aftercollision)
self.particles[j].set_vel(vel_j_aftercollision)
# check for particle - wheel collision
def check_wheel_collision(self, i, j):
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.wheels[j].center)
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.wheels[j].radius*.7
if dist_ij > radius_i + radius_j:
return
# ensures particles do not cross wheel boundaries
dist_in = -(dist_ij - radius_j - radius_i) # distance inside of wheel
theta = math.asin((pos_i[1] - pos_j[1]) /dist_ij) #angle from centre of wheel
newPos = [(math.cos(theta) * dist_in), (math.sin(theta) * dist_in)]
# makes sure to flip new x pos to the left
if pos_i[0] < pos_j[0]:
newPos[0] *= -1
# updates the particle position
self.particles[i].set_pos([pos_i[0] + newPos[0], pos_i[1] + newPos[1]])
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = 0
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.wheels[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
self.particles[i].set_vel(vel_i_aftercollision)
# ANGULAR COLISION #
# detect collision with lines on wheel
for x in range(len(self.wheels[j].lines)):
line = self.wheels[j].lines[x]
A = self.wheels[j].center
C = self.particles[i].state[0:2]
if A == line.topleft:
B = line.bottomright
elif A == line.bottomright:
B = line.topleft
elif A == line.topright:
B = line.bottomleft
else:
B = line.topright
dist = np.sqrt((B[0]-A[0])**2+(B[1]-A[1])**2)
Dx = (B[0]-A[0])/dist
Dy = (B[1]-A[1])/dist
t = Dx*(C[0]-A[0])+Dy*(C[1]-A[1])
Ex = t*Dx+A[0]
Ey = t*Dy+A[1]
dist2 = np.sqrt((Ex-C[0])**2+(Ey-C[1])**2)
#if (dist2 < self.particles[i].radius):
#Do conservation of momentum for angular momentum
def main():
# initializing pygame
pygame.init()
clock = pygame.time.Clock()
# top left corner is (0,0)
screen = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption('Water Wheel of Fortune')
world = World(win_height, win_width | self.particles = [x for x in self.particles if self.outside_screen(x)] | identifier_body |
particle.py | vel
return self
def update(self, dt):
self.t += dt
self.state[3] += dt * self.gravity
self.state[0] += self.state[2] * dt
self.state[1] += self.state[3] * dt
def move_by(self, delta):
self.state[0:2] = np.add(self.pos, delta)
return self
def draw(self, surface):
rect = self.image.get_rect()
rect.center = (self.state[0], win_height-self.state[1]) # Flipping y
surface.blit(self.image, rect)
def pprint(self):
print 'Particle', self.state
class Wheel(pygame.sprite.Sprite):
def __init__(self, center, radius, mass=1000):
pygame.sprite.Sprite.__init__(self)
self.state = np.zeros(4)
self.state[0:2] = np.zeros(2) # position
self.state[2] = 1 # angular velocity
self.state[3] = 0 # angular momentum
self.lines = []
self.mass = mass
self.t = 0
self.center = center
self.radius = radius
self.angle = 0
self.torque = 0
def set_vel(self, vel):
self.state[2] = vel
return self
def update(self, dt):
self.t += dt
def draw(self, surface):
self.angle += self.state[2]
for i in range(0,316, 45):
x = self.center[0] + math.cos(math.radians(self.angle + i)) * self.radius
y = self.center[1] + math.sin(math.radians(self.angle + i)) * self.radius
if (len(self.lines) <= 7):
self.lines.append(pygame.draw.line(surface, BLACK, self.center, (x,y), 5))
else:
self.lines[i/45] = pygame.draw.line(surface, BLACK, self.center, (x,y), 5)
self.circle = pygame.draw.circle(surface, BLACK, self.center, (int)(self.radius*.7), 10)
def pprint(self):
print 'Wheel', self.state
class World:
def __init__(self, height, width):
self.particles = []
self.wheels =[]
self.height = height
self.width = width
self.e = .2 # Coefficient of restitution
def | (self, imgfile, radius, mass=1.0):
particle = Particle(imgfile, radius, mass)
self.particles.append(particle)
return particle
def addWheel(self, centre, radius):
wheel = Wheel(centre, radius)
self.wheels.append(wheel)
return wheel
def pprint(self):
print '#particles', len(self.particles)
for d in self.particles:
d.pprint()
def draw(self, screen):
for d in self.particles:
d.draw(screen)
for w in self.wheels:
w.draw(screen)
def update(self, dt):
t = []
for d in self.particles:
d.update(dt)
for i in range(0, len(self.particles)):
self.check_for_collision(i)
try:
for j in range(len(self.wheels)):
t.append(threading.Thread(target=self.check_wheel_collision(i, j)))
t[i].start()
except:
print "Collision detection threading error"
for x in t:
x.join()
self.check_outside_screen()
def check_outside_screen(self):
self.particles = [x for x in self.particles if self.outside_screen(x)]
def outside_screen(self, particle):
if (particle.state[0] < -particle.radius):
return False
elif (particle.state[0] > win_width + particle.radius):
return False
elif (particle.state[1] < -particle.radius):
return False
else:
return True
# check for inter-particle collision
def check_for_collision(self, i):
if (self.particles[i].state[0] - self.particles[i].radius <= 0 or
self.particles[i].state[0] + self.particles[i].radius >= 800):
self.particles[i].state[2] *= -1*self.e
elif (self.particles[i].state[1] - self.particles[i].radius <= 0):
self.particles[i].state[3] = 0
for j in range(i+1, len(self.particles)):
if i == j:
return
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.particles[j].state[0:2])
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.particles[j].radius
if dist_ij > radius_i + radius_j:
return
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = np.array(self.particles[j].state[2:])
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.particles[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
vel_j_aftercollision = vel_j - n_ij * J / mass_j
self.particles[i].set_vel(vel_i_aftercollision)
self.particles[j].set_vel(vel_j_aftercollision)
# check for particle - wheel collision
def check_wheel_collision(self, i, j):
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.wheels[j].center)
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.wheels[j].radius*.7
if dist_ij > radius_i + radius_j:
return
# ensures particles do not cross wheel boundaries
dist_in = -(dist_ij - radius_j - radius_i) # distance inside of wheel
theta = math.asin((pos_i[1] - pos_j[1]) /dist_ij) #angle from centre of wheel
newPos = [(math.cos(theta) * dist_in), (math.sin(theta) * dist_in)]
# makes sure to flip new x pos to the left
if pos_i[0] < pos_j[0]:
newPos[0] *= -1
# updates the particle position
self.particles[i].set_pos([pos_i[0] + newPos[0], pos_i[1] + newPos[1]])
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = 0
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.wheels[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
self.particles[i].set_vel(vel_i_aftercollision)
# ANGULAR COLISION #
# detect collision with lines on wheel
for x in range(len(self.wheels[j].lines)):
line = self.wheels[j].lines[x]
A = self.wheels[j].center
C = self.particles[i].state[0:2]
if A == line.topleft:
B = line.bottomright
elif A == line.bottomright:
B = line.topleft
elif A == line.topright:
B = line.bottomleft
else:
B = line.topright
dist = np.sqrt((B[0]-A[0])**2+(B[1]-A[1])**2)
Dx = (B[0]-A[0])/dist
Dy = (B[1]-A[1])/dist
t = Dx*(C[0]-A[0])+Dy*(C[1]-A[1])
Ex = t*Dx+A[0]
Ey = t*Dy+A[1]
dist2 = np.sqrt((Ex-C[0])**2+(Ey-C[1])**2)
#if (dist2 < self.particles[i].radius):
#Do conservation of momentum for angular momentum
def main():
# initializing pygame
pygame.init()
clock = pygame.time.Clock()
# top left corner is (0,0)
screen = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption('Water Wheel of Fortune')
world = World(win_height, win_width | add | identifier_name |
peticiones_bitacora.js | "Busqueda rapida: ",
oPaginate: {
"sFirst": "Primero",
"sLast": "Último",
"sNext": "Siguiente",
"sPrevious": "Anterior"
}
},
lengthMenu: [[10, 25, 50, -1], [10, 25, 50, "Todos"]],
function(start, end, label) {
maxDateFilter = end;
minDateFilter = start;
table.draw();
},
dom: 'lBfrtip',
buttons: [
{
extend: 'excelHtml5',
text: '<span class="icon-file-excel"></span>',
titleAttr: 'Excel',
title: 'Bitacora de Rutas del Vendedor',
customize: function (xlsx) {
var sheet = xlsx.xl.worksheets['sheet1.xml'];
//All cells
$('row c', sheet).attr('s', '25');
//First row
$('row:first c', sheet).attr('s', '32');
//Second row
$('row c[r*="2"]', sheet).attr('s', '47');
//doce row
$('row c[r*="12"]', sheet).attr('s', '25');
$('row c[r*="24"]', sheet).attr('s', '25');
},
//Para que excel solo me exporte ciertas columnas
exportOptions: {
columns: [0, 1, 2, 3, 4, 5]
}
}],
//Para que el DataTable no me muestre ciertas Columnas.
columnDefs: [
{
"width": "8%",
targets: [0],
},
{
"width": "8%",
targets: [1],
},
{
"width": "8%",
targets: [2],
},
{
"width": "5%",
targets: [3],
},
{
"width": "10%",
targets: [3],
},
{
"width": "2%",
targets: [5],
render: function (data) {
return moment(data).format('DD/MM/YYYY');
}
}],
});
//creo variables despues de que el DataTable este creado en el DOM.
var tabla = $("#tb_rutasBitacora").dataTable(); //funcion jquery
var table = $("#tb_rutasBitacora").DataTable(); //funcion DataTable-libreria
//para ajustar el diseño de tabla
table.columns.adjust().draw();
function addRowDT(data) {
//DESCRIPCION : Funcion que me crea el listado de rutas en el body del DataTable
var clientRazonS;
var replaceCli;
var clientRuc;
var visita;
var motivo;
for (var i = 0; i < data.length; i++) {
replaceCli = data[i].cliente;
clientRuc = replaceCli.replace(/ /g, "");
if (clientRuc == "") {
clientRazonS = data[i].razonsocial;
}
else {
clientRazonS = data[i].cliente;
}
if (data[i].visita == 1) {
visita = "Si";
} else {
visita = "No";
}
if (data[i].motivo == "") {
motivo = "--";
} else {
motivo = data[i].motivo;
}
tabla.fnAddData([
data[i].vendedor,
data[i].descripcion,
clientRazonS,
visita,
motivo,
data[i].fecha,
data[i].coordenadaX,
data[i].coordenadaY,
]);
}
$.fn.dataTableExt.afnFiltering.push(
function(settings, data, dataIndex) {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var createdAt = data[5] || 0;
var createdAtr = createdAt.split('/').join('-');
var createdAtf = moment(createdAtr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
var diffDate = moment(createdAtf, "YYYY-MM-DD");
//console.log(startDate);
if ((min != "" && max != "") && (endDate < startDate)) {
return true;
} else {
if ((min == "" || max == "") || (diffDate.isBetween(startDate, endDate, null, '[]'))) {
return true;
}
return false;
}
});
// Re-draw the table when the a date range filter changes
$('.date-range-filter').change(function () {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
if ((min != "" && max != "") && (endDate < startDate)) {
MmensajeOrdenFecha.css('opacity', '1')
.css('height', '40px');
setTimeout(function () {
MmensajeOrdenFecha.css('opacity', '0');
}, 2000);
setTimeout(function () {
MmensajeOrdenFecha.css('height', '0px')
}, 4000)
} else {
if (min != "" && max != "") {
var optionSelected = $("#vendedores").find("option:selected");
var valueSelected = optionSelected.val();
ListarBitacoras(valueSelected, MesActual, 0, startDate, endDate);
} else {
// table.draw();
}
}
});
}
function addSelectVendedor(data) {
//DESCRIPCION : Funcion para llenar la lista de opciones del select del Vendedor.
for (var i = 0; i < data.length; i++) {
vendedores.append("<option value=" + data[i]["ntraUsuario"] + ">" + data[i]["vendedor"] + "</option>");
}
}
//LLAMANDO A LA FUNCIO NDE AJAX VENDEDORES
function ListarVendedores() {
//DESCRIPCION : Funcion que me trae la lista de vendedores
$.ajax({
type: "POST",
url: "frmRutasAsignadas.aspx/ListarVendedores",
data: "{'flag': '1' }",
contentType: 'application/json; charset=utf-8',
error: function (xhr, ajaxOtions, thrownError) {
console.log(xhr.status + "\n" + xhr.responseText, "\n" + thrownError);
},
success: function (data) {
addSelectVendedor(data.d);
}
})
}
ListarVendedores();
function ListarBitacoras(idVendedor, Mes, flagFiltro,fechaIncio,fechaFin) {
//DESCRICION : Funcion que me obtiene la lista de bitacoras
var json = JSON.stringify({
codVendedor: idVendedor,
fechaActual: Mes,
flagFiltro: flagFiltro,
fechaIncio: fechaIncio,
fechaFin: fechaFin
});
$.ajax({
type: "POST",
url: "frmRutasBitacora.aspx/ListarRutasBitacora",
data: json,
contentType: 'application/json; charset=utf-8',
error: function (xhr, ajaxOtions, thrownError) {
console.log(xhr.status + "\n" + xhr.responseText, "\n" + thrownError);
},
success: function (data) {
console.log(data.d);
table.clear().draw();
addRowDT(data.d);
}
})
}
function SeleccionarVendedor() {
| //DESCRICION : Funcion que me obtiene la lista de rutas asignadas segun el vendedor seleccionado.
vendedores.change(function () {
var optionSelected = $(this).find("option:selected");
var valueSelected = optionSelected.val();
ListarBitacoras(valueSelected, MesActual, 1, fechaActualIncio, fechaActualIncio);
if (valueSelected > 0) {
rangoFecha.css("display", "block")
} else {
rangoFecha.css("display", "none");
}
});
}
| identifier_body |
|
peticiones_bitacora.js | Names: ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio',
'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre'
],
monthNamesShort: ['Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun',
'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic'
],
dayNames: ['Domingo', 'Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado'],
dayNamesShort: ['Dom', 'Lun', 'Mar', 'Mié', 'Juv', 'Vie', 'Sáb'],
dayNamesMin: ['Do', 'Lu', 'Ma', 'Mi', 'Ju', 'Vi', 'Sá'],
weekHeader: 'Sm',
dateFormat: 'dd/mm/yy',
showOn: "button",
buttonImage: "Imagenes/calendar.gif",
buttonImageOnly: true,
buttonText: "Select date",
language: "es"
}
);
$('#tb_rutasBitacora').DataTable({
language: {
lengthMenu: "Mostrar _MENU_ registros",
zeroRecords: "No hay registros",
info: "Mostrando la página _PAGE_ de _PAGES_",
infoEmpty: "No hay registros disponibles.",
infoFiltered: "(filtered from _MAX_ total records)",
search: "Busqueda rapida: ",
oPaginate: {
"sFirst": "Primero",
"sLast": "Último",
"sNext": "Siguiente",
"sPrevious": "Anterior"
}
},
lengthMenu: [[10, 25, 50, -1], [10, 25, 50, "Todos"]],
function(start, end, label) {
maxDateFilter = end;
minDateFilter = start;
table.draw();
},
dom: 'lBfrtip',
buttons: [
{
extend: 'excelHtml5',
text: '<span class="icon-file-excel"></span>',
titleAttr: 'Excel',
title: 'Bitacora de Rutas del Vendedor',
customize: function (xlsx) {
var sheet = xlsx.xl.worksheets['sheet1.xml'];
//All cells
$('row c', sheet).attr('s', '25');
//First row
$('row:first c', sheet).attr('s', '32');
//Second row
$('row c[r*="2"]', sheet).attr('s', '47');
//doce row
$('row c[r*="12"]', sheet).attr('s', '25');
$('row c[r*="24"]', sheet).attr('s', '25');
},
//Para que excel solo me exporte ciertas columnas
exportOptions: {
columns: [0, 1, 2, 3, 4, 5]
}
}],
//Para que el DataTable no me muestre ciertas Columnas.
columnDefs: [
{
"width": "8%",
targets: [0],
},
{
"width": "8%",
targets: [1],
},
{
"width": "8%",
targets: [2],
},
{
"width": "5%",
targets: [3],
},
{
"width": "10%",
targets: [3],
},
{
"width": "2%",
targets: [5],
render: function (data) {
return moment(data).format('DD/MM/YYYY');
}
}],
});
//creo variables despues de que el DataTable este creado en el DOM.
var tabla = $("#tb_rutasBitacora").dataTable(); //funcion jquery
var table = $("#tb_rutasBitacora").DataTable(); //funcion DataTable-libreria
//para ajustar el diseño de tabla
table.columns.adjust().draw();
function addRowDT(data) {
//DESCRIPCION : Funcion que me crea el listado de rutas en el body del DataTable
var clientRazonS;
var replaceCli;
var clientRuc;
var visita;
var motivo;
for (var i = 0; i < data.length; i++) {
replaceCli = data[i].cliente;
clientRuc = replaceCli.replace(/ /g, "");
if (clientRuc == "") {
clientRazonS = data[i].razonsocial;
}
else {
clientRazonS = data[i].cliente;
}
if (data[i].visita == 1) {
visita = "Si";
} else {
visita = "No";
}
if (data[i].motivo == "") {
motivo = "--";
} else {
motivo = data[i].motivo;
}
tabla.fnAddData([
data[i].vendedor,
data[i].descripcion,
clientRazonS,
visita,
motivo,
data[i].fecha,
data[i].coordenadaX,
data[i].coordenadaY,
]);
}
$.fn.dataTableExt.afnFiltering.push(
function(settings, data, dataIndex) {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var createdAt = data[5] || 0;
var createdAtr = createdAt.split('/').join('-');
var createdAtf = moment(createdAtr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
var diffDate = moment(createdAtf, "YYYY-MM-DD");
//console.log(startDate);
if ((min != "" && max != "") && (endDate < startDate)) {
return true;
} else {
if ((min == "" || max == "") || (diffDate.isBetween(startDate, endDate, null, '[]'))) {
return true;
}
return false;
}
});
// Re-draw the table when the a date range filter changes
$('.date-range-filter').change(function () {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
if ((min != "" && max != "") && (endDate < startDate)) {
MmensajeOrdenFecha.css('opacity', '1')
.css('height', '40px');
setTimeout(function () {
MmensajeOrdenFecha.css('opacity', '0');
}, 2000);
setTimeout(function () {
MmensajeOrdenFecha.css('height', '0px')
}, 4000)
} else {
if (min != "" && max != "") {
var optionSelected = $("#vendedores").find("option:selected");
var valueSelected = optionSelected.val();
ListarBitacoras(valueSelected, MesActual, 0, startDate, endDate);
} else {
// table.draw();
}
}
});
}
function addSelectVendedor(data) {
//DESCRIPCION : Funcion para llenar la lista de opciones del select del Vendedor.
for (var i = 0; i < data.length; i++) {
vendedores.append("<option value=" + data[i]["ntraUsuario"] + ">" + data[i]["vendedor"] + "</option>");
}
}
//LLAMANDO A LA FUNCIO NDE AJAX VENDEDORES
function ListarVendedores() {
//DESCRIPCION : Funcion que me trae la lista de vendedores
$.ajax({
type: "POST",
url: "frmRutasAsignadas.aspx/ListarVendedores",
data: "{'flag': '1' }",
contentType: 'application/json; charset=utf-8',
error: function (xhr, ajaxOtions, thrownError) {
console.log(xhr.status + "\n" + xhr.responseText, "\n" + thrownError);
},
success: function (data) {
addSelectVendedor(data.d);
}
})
}
ListarVendedores();
function Lista | rBitacoras(idVe | identifier_name |
|
peticiones_bitacora.js | dayNames: ['Domingo', 'Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado'],
dayNamesShort: ['Dom', 'Lun', 'Mar', 'Mié', 'Juv', 'Vie', 'Sáb'],
dayNamesMin: ['Do', 'Lu', 'Ma', 'Mi', 'Ju', 'Vi', 'Sá'],
weekHeader: 'Sm',
dateFormat: 'dd/mm/yy',
showOn: "button",
buttonImage: "Imagenes/calendar.gif",
buttonImageOnly: true,
buttonText: "Select date",
language : "es"
}
);
$("#max-date").datepicker(
{
changeMonth: true,
changeYear: true,
monthNames: ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio',
'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre'
],
monthNamesShort: ['Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun',
'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic'
],
dayNames: ['Domingo', 'Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado'],
dayNamesShort: ['Dom', 'Lun', 'Mar', 'Mié', 'Juv', 'Vie', 'Sáb'],
dayNamesMin: ['Do', 'Lu', 'Ma', 'Mi', 'Ju', 'Vi', 'Sá'],
weekHeader: 'Sm',
dateFormat: 'dd/mm/yy',
showOn: "button",
buttonImage: "Imagenes/calendar.gif",
buttonImageOnly: true,
buttonText: "Select date",
language: "es"
}
);
$('#tb_rutasBitacora').DataTable({
language: {
lengthMenu: "Mostrar _MENU_ registros",
zeroRecords: "No hay registros",
info: "Mostrando la página _PAGE_ de _PAGES_",
infoEmpty: "No hay registros disponibles.",
infoFiltered: "(filtered from _MAX_ total records)",
search: "Busqueda rapida: ",
oPaginate: {
"sFirst": "Primero",
"sLast": "Último",
"sNext": "Siguiente",
"sPrevious": "Anterior"
}
},
lengthMenu: [[10, 25, 50, -1], [10, 25, 50, "Todos"]],
function(start, end, label) {
maxDateFilter = end;
minDateFilter = start;
table.draw();
},
dom: 'lBfrtip',
buttons: [
{
extend: 'excelHtml5',
text: '<span class="icon-file-excel"></span>',
titleAttr: 'Excel',
title: 'Bitacora de Rutas del Vendedor',
customize: function (xlsx) {
var sheet = xlsx.xl.worksheets['sheet1.xml'];
//All cells
$('row c', sheet).attr('s', '25');
//First row
$('row:first c', sheet).attr('s', '32');
//Second row
$('row c[r*="2"]', sheet).attr('s', '47');
//doce row
$('row c[r*="12"]', sheet).attr('s', '25');
$('row c[r*="24"]', sheet).attr('s', '25');
},
//Para que excel solo me exporte ciertas columnas
exportOptions: {
columns: [0, 1, 2, 3, 4, 5]
}
}],
//Para que el DataTable no me muestre ciertas Columnas.
columnDefs: [
{
"width": "8%",
targets: [0],
},
{
"width": "8%",
targets: [1],
},
{
"width": "8%",
targets: [2],
},
{
"width": "5%",
targets: [3],
},
{
"width": "10%",
targets: [3],
},
{
"width": "2%",
targets: [5],
render: function (data) {
return moment(data).format('DD/MM/YYYY');
}
}],
});
//creo variables despues de que el DataTable este creado en el DOM.
var tabla = $("#tb_rutasBitacora").dataTable(); //funcion jquery
var table = $("#tb_rutasBitacora").DataTable(); //funcion DataTable-libreria
//para ajustar el diseño de tabla
table.columns.adjust().draw();
function addRowDT(data) {
//DESCRIPCION : Funcion que me crea el listado de rutas en el body del DataTable
var clientRazonS;
var replaceCli;
var clientRuc;
var visita;
var motivo;
for (var i = 0; i < data.length; i++) {
| motivo = "--";
} else {
motivo = data[i].motivo;
}
tabla.fnAddData([
data[i].vendedor,
data[i].descripcion,
clientRazonS,
visita,
motivo,
data[i].fecha,
data[i].coordenadaX,
data[i].coordenadaY,
]);
}
$.fn.dataTableExt.afnFiltering.push(
function(settings, data, dataIndex) {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var createdAt = data[5] || 0;
var createdAtr = createdAt.split('/').join('-');
var createdAtf = moment(createdAtr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
var diffDate = moment(createdAtf, "YYYY-MM-DD");
//console.log(startDate);
if ((min != "" && max != "") && (endDate < startDate)) {
return true;
} else {
if ((min == "" || max == "") || (diffDate.isBetween(startDate, endDate, null, '[]'))) {
return true;
}
return false;
}
});
// Re-draw the table when the a date range filter changes
$('.date-range-filter').change(function () {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
if ((min != "" && max != "") && (endDate < startDate)) {
MmensajeOrdenFecha.css('opacity', '1')
.css('height', '40px');
setTimeout(function () {
MmensajeOrdenFecha.css('opacity', '0');
}, 2000);
setTimeout(function () {
MmensajeOrdenFecha.css('height', '0px')
}, 4000)
} else {
if (min != "" && max != "") {
var optionSelected = $("#vendedores").find("option:selected");
var valueSelected = optionSelected.val();
ListarBitacoras(valueSelected, MesActual, 0, startDate, endDate);
} else {
// table.draw();
}
}
});
}
function addSelectVendedor(data) {
//DESCRIPCION : Funcion para llenar la lista de opciones del select del Vendedor.
for (var i = 0; i < data.length; i++) {
vendedores.append("<option value=" + data[i]["ntraUsuario"] + ">" + data[i]["vendedor"] + | replaceCli = data[i].cliente;
clientRuc = replaceCli.replace(/ /g, "");
if (clientRuc == "") {
clientRazonS = data[i].razonsocial;
}
else {
clientRazonS = data[i].cliente;
}
if (data[i].visita == 1) {
visita = "Si";
} else {
visita = "No";
}
if (data[i].motivo == "") { | conditional_block |
peticiones_bitacora.js | dayNames: ['Domingo', 'Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado'],
dayNamesShort: ['Dom', 'Lun', 'Mar', 'Mié', 'Juv', 'Vie', 'Sáb'],
dayNamesMin: ['Do', 'Lu', 'Ma', 'Mi', 'Ju', 'Vi', 'Sá'],
weekHeader: 'Sm',
dateFormat: 'dd/mm/yy',
showOn: "button",
buttonImage: "Imagenes/calendar.gif",
buttonImageOnly: true,
buttonText: "Select date",
language : "es"
}
);
$("#max-date").datepicker(
{
changeMonth: true,
changeYear: true,
monthNames: ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio',
'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre'
],
monthNamesShort: ['Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun',
'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic'
],
dayNames: ['Domingo', 'Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado'],
dayNamesShort: ['Dom', 'Lun', 'Mar', 'Mié', 'Juv', 'Vie', 'Sáb'],
dayNamesMin: ['Do', 'Lu', 'Ma', 'Mi', 'Ju', 'Vi', 'Sá'],
weekHeader: 'Sm',
dateFormat: 'dd/mm/yy',
showOn: "button",
buttonImage: "Imagenes/calendar.gif",
buttonImageOnly: true,
buttonText: "Select date",
language: "es"
}
);
$('#tb_rutasBitacora').DataTable({
language: {
lengthMenu: "Mostrar _MENU_ registros",
zeroRecords: "No hay registros",
info: "Mostrando la página _PAGE_ de _PAGES_",
infoEmpty: "No hay registros disponibles.",
infoFiltered: "(filtered from _MAX_ total records)",
search: "Busqueda rapida: ",
oPaginate: {
"sFirst": "Primero",
"sLast": "Último",
"sNext": "Siguiente",
"sPrevious": "Anterior"
}
},
lengthMenu: [[10, 25, 50, -1], [10, 25, 50, "Todos"]],
function(start, end, label) {
maxDateFilter = end;
minDateFilter = start;
table.draw();
},
dom: 'lBfrtip',
buttons: [
{
extend: 'excelHtml5',
text: '<span class="icon-file-excel"></span>',
titleAttr: 'Excel',
title: 'Bitacora de Rutas del Vendedor',
customize: function (xlsx) {
var sheet = xlsx.xl.worksheets['sheet1.xml'];
//All cells
$('row c', sheet).attr('s', '25');
//First row
$('row:first c', sheet).attr('s', '32');
//Second row
$('row c[r*="2"]', sheet).attr('s', '47');
//doce row
$('row c[r*="12"]', sheet).attr('s', '25');
$('row c[r*="24"]', sheet).attr('s', '25');
},
//Para que excel solo me exporte ciertas columnas
exportOptions: {
columns: [0, 1, 2, 3, 4, 5]
}
}],
//Para que el DataTable no me muestre ciertas Columnas.
columnDefs: [
{
"width": "8%",
targets: [0],
},
{
"width": "8%",
targets: [1],
},
{
"width": "8%",
targets: [2],
},
{ |
},
{
"width": "10%",
targets: [3],
},
{
"width": "2%",
targets: [5],
render: function (data) {
return moment(data).format('DD/MM/YYYY');
}
}],
});
//creo variables despues de que el DataTable este creado en el DOM.
var tabla = $("#tb_rutasBitacora").dataTable(); //funcion jquery
var table = $("#tb_rutasBitacora").DataTable(); //funcion DataTable-libreria
//para ajustar el diseño de tabla
table.columns.adjust().draw();
function addRowDT(data) {
//DESCRIPCION : Funcion que me crea el listado de rutas en el body del DataTable
var clientRazonS;
var replaceCli;
var clientRuc;
var visita;
var motivo;
for (var i = 0; i < data.length; i++) {
replaceCli = data[i].cliente;
clientRuc = replaceCli.replace(/ /g, "");
if (clientRuc == "") {
clientRazonS = data[i].razonsocial;
}
else {
clientRazonS = data[i].cliente;
}
if (data[i].visita == 1) {
visita = "Si";
} else {
visita = "No";
}
if (data[i].motivo == "") {
motivo = "--";
} else {
motivo = data[i].motivo;
}
tabla.fnAddData([
data[i].vendedor,
data[i].descripcion,
clientRazonS,
visita,
motivo,
data[i].fecha,
data[i].coordenadaX,
data[i].coordenadaY,
]);
}
$.fn.dataTableExt.afnFiltering.push(
function(settings, data, dataIndex) {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var createdAt = data[5] || 0;
var createdAtr = createdAt.split('/').join('-');
var createdAtf = moment(createdAtr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
var diffDate = moment(createdAtf, "YYYY-MM-DD");
//console.log(startDate);
if ((min != "" && max != "") && (endDate < startDate)) {
return true;
} else {
if ((min == "" || max == "") || (diffDate.isBetween(startDate, endDate, null, '[]'))) {
return true;
}
return false;
}
});
// Re-draw the table when the a date range filter changes
$('.date-range-filter').change(function () {
var min = $('#min-date').val();
var minr = min.split('/').join('-');
var max = $('#max-date').val()
var maxr = max.split('/').join('-');
var minf = moment(minr, 'DD-MM-YYYY').format("YYYY-MM-DD");
var maxf = moment(maxr, 'DD-MM-YYYY').format("YYYY-MM-DD");
//createdAt=createdAt.split(" ");
var startDate = moment(minf, "YYYY-MM-DD");
var endDate = moment(maxf, "YYYY-MM-DD");
if ((min != "" && max != "") && (endDate < startDate)) {
MmensajeOrdenFecha.css('opacity', '1')
.css('height', '40px');
setTimeout(function () {
MmensajeOrdenFecha.css('opacity', '0');
}, 2000);
setTimeout(function () {
MmensajeOrdenFecha.css('height', '0px')
}, 4000)
} else {
if (min != "" && max != "") {
var optionSelected = $("#vendedores").find("option:selected");
var valueSelected = optionSelected.val();
ListarBitacoras(valueSelected, MesActual, 0, startDate, endDate);
} else {
// table.draw();
}
}
});
}
function addSelectVendedor(data) {
//DESCRIPCION : Funcion para llenar la lista de opciones del select del Vendedor.
for (var i = 0; i < data.length; i++) {
vendedores.append("<option value=" + data[i]["ntraUsuario"] + ">" + data[i]["vendedor"] + "</ | "width": "5%",
targets: [3], | random_line_split |
lib.rs | .0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) {
window()
.request_animation_frame(f.as_ref().unchecked_ref())
.expect("should register `requestAnimationFrame` OK");
}
#[wasm_bindgen(start)]
pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn compile_shader(
context: &WebGlRenderingContext,
shader_type: u32,
source: &str,
) -> Result<WebGlShader, String> {
let shader = context
.create_shader(shader_type)
.ok_or_else(|| String::from("Unable to create shader object"))?;
context.shader_source(&shader, source);
context.compile_shader(&shader);
if context
.get_shader_parameter(&shader, WebGlRenderingContext::COMPILE_STATUS)
.as_bool()
.unwrap_or(false)
| {
Ok(shader)
} | conditional_block |
|
lib.rs | 8] = [
-1.0, -1.0, 0.0, // Bottom left
1.0, -1.0, 0.0, // Bottem right
1.0, 1.0, 0.0, // Top right
-1.0, -1.0, 0.0, // Bottom left
1.0, 1.0, 0.0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) {
window()
.request_animation_frame(f.as_ref().unchecked_ref()) | pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn compile_shader(
context: &WebGlRenderingContext,
shader_type: u32,
source: & | .expect("should register `requestAnimationFrame` OK");
}
#[wasm_bindgen(start)] | random_line_split |
lib.rs | ] = [
-1.0, -1.0, 0.0, // Bottom left
1.0, -1.0, 0.0, // Bottem right
1.0, 1.0, 0.0, // Top right
-1.0, -1.0, 0.0, // Bottom left
1.0, 1.0, 0.0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) {
window()
.request_animation_frame(f.as_ref().unchecked_ref())
.expect("should register `requestAnimationFrame` OK");
}
#[wasm_bindgen(start)]
pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn | (
context: &WebGlRenderingContext,
shader_type: u32,
source: & | compile_shader | identifier_name |
lib.rs | ] = [
-1.0, -1.0, 0.0, // Bottom left
1.0, -1.0, 0.0, // Bottem right
1.0, 1.0, 0.0, // Top right
-1.0, -1.0, 0.0, // Bottom left
1.0, 1.0, 0.0, // Top right
-1.0, 1.0, 0.0, // Top left
];
fn window() -> web_sys::Window {
web_sys::window().expect("no global `window` exists")
}
fn request_animation_frame(f: &Closure<dyn FnMut()>) |
#[wasm_bindgen(start)]
pub fn start() {
utils::set_panic_hook();
log!("Hello there! Compositor canvas starting/loading");
}
#[wasm_bindgen]
pub fn initialise(element_id: String) -> Result<(), JsValue> {
log!(
"Compositor canvas (element_id: String = `{}`) initialisation",
&element_id
);
let document = web_sys::window().unwrap().document().unwrap();
let canvas = document.get_element_by_id(&element_id).unwrap();
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("webgl")?
.unwrap()
.dyn_into::<WebGlRenderingContext>()?;
let vert_shader = compile_shader(
&context,
WebGlRenderingContext::VERTEX_SHADER,
r#"
attribute vec4 position;
attribute vec2 textureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = position;
vTextureCoord = textureCoord;
}
"#,
)?;
let frag_shader = compile_shader(
&context,
WebGlRenderingContext::FRAGMENT_SHADER,
r#"
varying highp vec2 vTextureCoord;
uniform sampler2D image;
void main(void) {
gl_FragColor = texture2D(image, vTextureCoord);
gl_FragColor = vec4(gl_FragColor.b, gl_FragColor.g, gl_FragColor.r, gl_FragColor.a);
}
"#,
)?;
let program = link_program(&context, &vert_shader, &frag_shader)?;
let position_location = context.get_attrib_location(&program, "position");
let texcoord_location = context.get_attrib_location(&program, "textureCoord");
let texture_location = context.get_uniform_location(&program, "image"); //.unwrap();
// Bind shader
context.use_program(Some(&program));
// Build model
let vertex_buffer = context
.create_buffer()
.ok_or("failed to create vertex buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&vertex_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let vert_array = js_sys::Float32Array::view(&VERTICES);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&vert_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
position_location as u32,
3,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(position_location as u32);
// Add uvs
let uvs: [f32; 12] = [
0.0, 1.0, // Bottom left
1.0, 1.0, // Bottem right
1.0, 0.0, // Top right
0.0, 1.0, // Bottom left
1.0, 0.0, // Top right
0.0, 0.0, // Top left
];
let uv_buffer = context
.create_buffer()
.ok_or("failed to create uv buffer")?;
context.bind_buffer(WebGlRenderingContext::ARRAY_BUFFER, Some(&uv_buffer));
// Note that `Float32Array::view` is somewhat dangerous (hence the
// `unsafe`!). This is creating a raw view into our module's
// `WebAssembly.Memory` buffer, but if we allocate more pages for ourself
// (aka do a memory allocation in Rust) it'll cause the buffer to change,
// causing the `Float32Array` to be invalid.
//
// As a result, after `Float32Array::view` we have to be very careful not to
// do any memory allocations before it's dropped.
unsafe {
let uv_array = js_sys::Float32Array::view(&uvs);
context.buffer_data_with_array_buffer_view(
WebGlRenderingContext::ARRAY_BUFFER,
&uv_array,
WebGlRenderingContext::STATIC_DRAW,
);
}
context.vertex_attrib_pointer_with_i32(
texcoord_location as u32,
2,
WebGlRenderingContext::FLOAT,
false,
0,
0,
);
context.enable_vertex_attrib_array(texcoord_location as u32);
// Create a texture
let texture = context.create_texture();
context.bind_texture(WebGlRenderingContext::TEXTURE_2D, texture.as_ref());
unsafe {
context
.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_u8_array(
//context.tex_image_2d_with_i32_and_i32_and_i32_and_format_and_type_and_opt_array_buffer_view(
WebGlRenderingContext::TEXTURE_2D,
0,
WebGlRenderingContext::RGBA as i32,
WIDTH,
HEIGHT,
0,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should create GPU memory OK");
}
context.generate_mipmap(WebGlRenderingContext::TEXTURE_2D);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_S,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_WRAP_T,
WebGlRenderingContext::CLAMP_TO_EDGE as i32,
);
context.tex_parameteri(
WebGlRenderingContext::TEXTURE_2D,
WebGlRenderingContext::TEXTURE_MAG_FILTER,
WebGlRenderingContext::LINEAR as i32,
);
context.uniform1i(Some(texture_location.unwrap().as_ref()), 0);
// draw()
context.clear_color(0.0, 0.0, 0.0, 1.0);
context.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
context.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
input_data_update_loop(context, texture.unwrap());
// Fin
Ok(())
}
pub fn input_data_update_loop(gl: WebGlRenderingContext, texture: web_sys::WebGlTexture) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
{
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
gl.bind_texture(WebGlRenderingContext::TEXTURE_2D, Some(&texture));
unsafe {
if PIXEL_DATA_UPDATED == true {
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingContext::TEXTURE_2D,
0,
0,
0,
WIDTH,
HEIGHT,
WebGlRenderingContext::RGBA,
WebGlRenderingContext::UNSIGNED_BYTE,
Some(&PIXEL_DATA),
)
.expect("should update GPU memory OK");
PIXEL_DATA_UPDATED = false;
}
}
gl.clear_color(0.0, 0.0, 0.0, 1.0);
gl.clear(WebGlRenderingContext::COLOR_BUFFER_BIT);
gl.draw_arrays(
WebGlRenderingContext::TRIANGLES,
0,
(VERTICES.len() / 3) as i32,
);
//update_texture_and_draw(gl, texture, texture_location);
request_animation_frame(f.borrow().as_ref().unwrap());
}) as Box<dyn FnMut()>));
}
request_animation_frame(g.borrow().as_ref().unwrap());
}
pub fn compile_shader(
context: &WebGlRenderingContext,
shader_type: u32,
source: | {
window()
.request_animation_frame(f.as_ref().unchecked_ref())
.expect("should register `requestAnimationFrame` OK");
} | identifier_body |
async_stream_cdc.rs | )]
/// # use tokio_stream::StreamExt;
///
/// async fn run() {
/// let source = std::fs::read("test/fixtures/SekienAkashita.jpg").unwrap();
/// let mut chunker = AsyncStreamCDC::new(source.as_ref(), 4096, 16384, 65535);
/// let stream = chunker.as_stream();
///
/// let chunks = stream.collect::<Vec<_>>().await;
///
/// for result in chunks {
/// let chunk = result.unwrap();
/// println!("offset={} length={}", chunk.offset, chunk.length);
/// }
/// }
/// ```
///
pub struct AsyncStreamCDC<R> {
/// Buffer of data from source for finding cut points.
buffer: Vec<u8>,
/// Maximum capacity of the buffer (always `max_size`).
capacity: usize,
/// Number of relevant bytes in the `buffer`.
length: usize,
/// Source from which data is read into `buffer`.
source: R,
/// Number of bytes read from the source so far.
processed: u64,
/// True when the source produces no more data.
eof: bool,
min_size: usize,
avg_size: usize,
max_size: usize,
mask_s: u64,
mask_l: u64,
mask_s_ls: u64,
mask_l_ls: u64,
}
impl<R: AsyncRead + Unpin> AsyncStreamCDC<R> {
///
/// Construct a `StreamCDC` that will process bytes from the given source.
///
/// Uses chunk size normalization level 1 by default.
///
pub fn new(source: R, min_size: u32, avg_size: u32, max_size: u32) -> Self {
Self::with_level(source, min_size, avg_size, max_size, Normalization::Level1)
}
///
/// Create a new `StreamCDC` with the given normalization level.
///
pub fn with_level(
source: R,
min_size: u32,
avg_size: u32,
max_size: u32,
level: Normalization,
) -> Self {
assert!(min_size >= MINIMUM_MIN);
assert!(min_size <= MINIMUM_MAX);
assert!(avg_size >= AVERAGE_MIN);
assert!(avg_size <= AVERAGE_MAX);
assert!(max_size >= MAXIMUM_MIN);
assert!(max_size <= MAXIMUM_MAX);
let bits = logarithm2(avg_size);
let normalization = level.bits();
let mask_s = MASKS[(bits + normalization) as usize];
let mask_l = MASKS[(bits - normalization) as usize];
Self {
buffer: vec![0_u8; max_size as usize],
capacity: max_size as usize,
length: 0,
source,
eof: false,
processed: 0,
min_size: min_size as usize,
avg_size: avg_size as usize,
max_size: max_size as usize,
mask_s,
mask_l,
mask_s_ls: mask_s << 1,
mask_l_ls: mask_l << 1,
}
}
/// Fill the buffer with data from the source, returning the number of bytes
/// read (zero if end of source has been reached).
async fn fill_buffer(&mut self) -> Result<usize, Error> {
// this code originally copied from asuran crate
if self.eof {
Ok(0)
} else {
let mut all_bytes_read = 0;
while !self.eof && self.length < self.capacity {
let bytes_read = self.source.read(&mut self.buffer[self.length..]).await?;
if bytes_read == 0 {
self.eof = true;
} else {
self.length += bytes_read;
all_bytes_read += bytes_read;
}
}
Ok(all_bytes_read)
}
}
/// Drains a specified number of bytes from the buffer, then resizes the
/// buffer back to `capacity` size in preparation for further reads.
fn drain_bytes(&mut self, count: usize) -> Result<Vec<u8>, Error> {
// this code originally copied from asuran crate
if count > self.length {
Err(Error::Other(format!(
"drain_bytes() called with count larger than length: {} > {}",
count, self.length
)))
} else {
let data = self.buffer.drain(..count).collect::<Vec<u8>>();
self.length -= count;
self.buffer.resize(self.capacity, 0_u8);
Ok(data)
}
}
/// Find the next chunk in the source. If the end of the source has been
/// reached, returns `Error::Empty` as the error.
async fn read_chunk(&mut self) -> Result<ChunkData, Error> {
self.fill_buffer().await?;
if self.length == 0 {
Err(Error::Empty)
} else {
let (hash, count) = cut(
&self.buffer[..self.length],
self.min_size,
self.avg_size,
self.max_size,
self.mask_s,
self.mask_l,
self.mask_s_ls,
self.mask_l_ls,
);
if count == 0 {
Err(Error::Empty)
} else {
let offset = self.processed;
self.processed += count as u64;
let data = self.drain_bytes(count)?;
Ok(ChunkData {
hash,
offset,
length: count,
data,
})
}
}
}
#[cfg(all(feature = "tokio", not(feature = "futures")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
try_stream! {
loop {
match self.read_chunk().await {
Ok(chunk) => yield chunk,
Err(Error::Empty) => {
break;
}
error @ Err(_) => {
error?;
}
}
}
}
}
#[cfg(all(feature = "futures", not(feature = "tokio")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
futures::stream::unfold(self, |this| async {
let chunk = this.read_chunk().await;
if let Err(Error::Empty) = chunk {
None
} else {
Some((chunk, this))
}
})
}
}
#[cfg(test)]
mod tests {
use crate::v2020::MASKS;
use super::AsyncStreamCDC;
#[test]
#[should_panic]
fn test_minimum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 63, 256, 1024);
}
#[test]
#[should_panic]
fn test_minimum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 67_108_867, 256, 1024);
}
#[test]
#[should_panic]
fn | () {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 255, 1024);
}
#[test]
#[should_panic]
fn test_average_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 268_435_457, 1024);
}
#[test]
#[should_panic]
fn test_maximum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1023);
}
#[test]
#[should_panic]
fn test_maximum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1_073_741_825);
}
#[test]
fn test_masks() {
let source = [0u8; 1024];
let chunker = AsyncStreamCDC::new(source.as_slice(), 64, 256, 1024);
assert_eq!(chunker.mask_l, MASKS[7]);
assert_eq!(chunker.mask_s, MASKS[9]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 8192, 16384, 32768);
assert_eq!(chunker.mask_l, MASKS[13]);
assert_eq!(chunker.mask_s, MASKS[15]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 1_048_576, 4_194_304, 16_777_216);
assert_eq!(chunker.mask_l, MASKS[21]);
assert_eq!(chunker.mask_s, MASKS[23 | test_average_too_low | identifier_name |
async_stream_cdc.rs | )]
/// # use tokio_stream::StreamExt;
///
/// async fn run() {
/// let source = std::fs::read("test/fixtures/SekienAkashita.jpg").unwrap();
/// let mut chunker = AsyncStreamCDC::new(source.as_ref(), 4096, 16384, 65535);
/// let stream = chunker.as_stream();
///
/// let chunks = stream.collect::<Vec<_>>().await;
///
/// for result in chunks {
/// let chunk = result.unwrap();
/// println!("offset={} length={}", chunk.offset, chunk.length);
/// }
/// }
/// ```
///
pub struct AsyncStreamCDC<R> {
/// Buffer of data from source for finding cut points.
buffer: Vec<u8>,
/// Maximum capacity of the buffer (always `max_size`).
capacity: usize,
/// Number of relevant bytes in the `buffer`.
length: usize,
/// Source from which data is read into `buffer`.
source: R,
/// Number of bytes read from the source so far.
processed: u64,
/// True when the source produces no more data.
eof: bool,
min_size: usize,
avg_size: usize,
max_size: usize,
mask_s: u64,
mask_l: u64,
mask_s_ls: u64,
mask_l_ls: u64,
}
impl<R: AsyncRead + Unpin> AsyncStreamCDC<R> {
///
/// Construct a `StreamCDC` that will process bytes from the given source.
///
/// Uses chunk size normalization level 1 by default.
///
pub fn new(source: R, min_size: u32, avg_size: u32, max_size: u32) -> Self {
Self::with_level(source, min_size, avg_size, max_size, Normalization::Level1)
}
///
/// Create a new `StreamCDC` with the given normalization level.
///
pub fn with_level(
source: R,
min_size: u32,
avg_size: u32,
max_size: u32,
level: Normalization,
) -> Self {
assert!(min_size >= MINIMUM_MIN);
assert!(min_size <= MINIMUM_MAX);
assert!(avg_size >= AVERAGE_MIN);
assert!(avg_size <= AVERAGE_MAX);
assert!(max_size >= MAXIMUM_MIN);
assert!(max_size <= MAXIMUM_MAX);
let bits = logarithm2(avg_size);
let normalization = level.bits();
let mask_s = MASKS[(bits + normalization) as usize];
let mask_l = MASKS[(bits - normalization) as usize];
Self {
buffer: vec![0_u8; max_size as usize],
capacity: max_size as usize,
length: 0,
source,
eof: false,
processed: 0,
min_size: min_size as usize,
avg_size: avg_size as usize,
max_size: max_size as usize,
mask_s,
mask_l,
mask_s_ls: mask_s << 1,
mask_l_ls: mask_l << 1,
}
}
/// Fill the buffer with data from the source, returning the number of bytes
/// read (zero if end of source has been reached).
async fn fill_buffer(&mut self) -> Result<usize, Error> {
// this code originally copied from asuran crate
if self.eof {
Ok(0)
} else {
let mut all_bytes_read = 0;
while !self.eof && self.length < self.capacity {
let bytes_read = self.source.read(&mut self.buffer[self.length..]).await?;
if bytes_read == 0 {
self.eof = true;
} else |
}
Ok(all_bytes_read)
}
}
/// Drains a specified number of bytes from the buffer, then resizes the
/// buffer back to `capacity` size in preparation for further reads.
fn drain_bytes(&mut self, count: usize) -> Result<Vec<u8>, Error> {
// this code originally copied from asuran crate
if count > self.length {
Err(Error::Other(format!(
"drain_bytes() called with count larger than length: {} > {}",
count, self.length
)))
} else {
let data = self.buffer.drain(..count).collect::<Vec<u8>>();
self.length -= count;
self.buffer.resize(self.capacity, 0_u8);
Ok(data)
}
}
/// Find the next chunk in the source. If the end of the source has been
/// reached, returns `Error::Empty` as the error.
async fn read_chunk(&mut self) -> Result<ChunkData, Error> {
self.fill_buffer().await?;
if self.length == 0 {
Err(Error::Empty)
} else {
let (hash, count) = cut(
&self.buffer[..self.length],
self.min_size,
self.avg_size,
self.max_size,
self.mask_s,
self.mask_l,
self.mask_s_ls,
self.mask_l_ls,
);
if count == 0 {
Err(Error::Empty)
} else {
let offset = self.processed;
self.processed += count as u64;
let data = self.drain_bytes(count)?;
Ok(ChunkData {
hash,
offset,
length: count,
data,
})
}
}
}
#[cfg(all(feature = "tokio", not(feature = "futures")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
try_stream! {
loop {
match self.read_chunk().await {
Ok(chunk) => yield chunk,
Err(Error::Empty) => {
break;
}
error @ Err(_) => {
error?;
}
}
}
}
}
#[cfg(all(feature = "futures", not(feature = "tokio")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
futures::stream::unfold(self, |this| async {
let chunk = this.read_chunk().await;
if let Err(Error::Empty) = chunk {
None
} else {
Some((chunk, this))
}
})
}
}
#[cfg(test)]
mod tests {
use crate::v2020::MASKS;
use super::AsyncStreamCDC;
#[test]
#[should_panic]
fn test_minimum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 63, 256, 1024);
}
#[test]
#[should_panic]
fn test_minimum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 67_108_867, 256, 1024);
}
#[test]
#[should_panic]
fn test_average_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 255, 1024);
}
#[test]
#[should_panic]
fn test_average_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 268_435_457, 1024);
}
#[test]
#[should_panic]
fn test_maximum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1023);
}
#[test]
#[should_panic]
fn test_maximum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1_073_741_825);
}
#[test]
fn test_masks() {
let source = [0u8; 1024];
let chunker = AsyncStreamCDC::new(source.as_slice(), 64, 256, 1024);
assert_eq!(chunker.mask_l, MASKS[7]);
assert_eq!(chunker.mask_s, MASKS[9]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 8192, 16384, 32768);
assert_eq!(chunker.mask_l, MASKS[13]);
assert_eq!(chunker.mask_s, MASKS[15]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 1_048_576, 4_194_304, 16_777_216);
assert_eq!(chunker.mask_l, MASKS[21]);
assert_eq!(chunker.mask_s, MASKS[2 | {
self.length += bytes_read;
all_bytes_read += bytes_read;
} | conditional_block |
async_stream_cdc.rs | )]
/// # use tokio_stream::StreamExt;
///
/// async fn run() {
/// let source = std::fs::read("test/fixtures/SekienAkashita.jpg").unwrap();
/// let mut chunker = AsyncStreamCDC::new(source.as_ref(), 4096, 16384, 65535);
/// let stream = chunker.as_stream();
///
/// let chunks = stream.collect::<Vec<_>>().await;
///
/// for result in chunks {
/// let chunk = result.unwrap();
/// println!("offset={} length={}", chunk.offset, chunk.length);
/// }
/// }
/// ```
///
pub struct AsyncStreamCDC<R> {
/// Buffer of data from source for finding cut points.
buffer: Vec<u8>,
/// Maximum capacity of the buffer (always `max_size`).
capacity: usize,
/// Number of relevant bytes in the `buffer`.
length: usize,
/// Source from which data is read into `buffer`.
source: R,
/// Number of bytes read from the source so far.
processed: u64, | max_size: usize,
mask_s: u64,
mask_l: u64,
mask_s_ls: u64,
mask_l_ls: u64,
}
impl<R: AsyncRead + Unpin> AsyncStreamCDC<R> {
///
/// Construct a `StreamCDC` that will process bytes from the given source.
///
/// Uses chunk size normalization level 1 by default.
///
pub fn new(source: R, min_size: u32, avg_size: u32, max_size: u32) -> Self {
Self::with_level(source, min_size, avg_size, max_size, Normalization::Level1)
}
///
/// Create a new `StreamCDC` with the given normalization level.
///
pub fn with_level(
source: R,
min_size: u32,
avg_size: u32,
max_size: u32,
level: Normalization,
) -> Self {
assert!(min_size >= MINIMUM_MIN);
assert!(min_size <= MINIMUM_MAX);
assert!(avg_size >= AVERAGE_MIN);
assert!(avg_size <= AVERAGE_MAX);
assert!(max_size >= MAXIMUM_MIN);
assert!(max_size <= MAXIMUM_MAX);
let bits = logarithm2(avg_size);
let normalization = level.bits();
let mask_s = MASKS[(bits + normalization) as usize];
let mask_l = MASKS[(bits - normalization) as usize];
Self {
buffer: vec![0_u8; max_size as usize],
capacity: max_size as usize,
length: 0,
source,
eof: false,
processed: 0,
min_size: min_size as usize,
avg_size: avg_size as usize,
max_size: max_size as usize,
mask_s,
mask_l,
mask_s_ls: mask_s << 1,
mask_l_ls: mask_l << 1,
}
}
/// Fill the buffer with data from the source, returning the number of bytes
/// read (zero if end of source has been reached).
async fn fill_buffer(&mut self) -> Result<usize, Error> {
// this code originally copied from asuran crate
if self.eof {
Ok(0)
} else {
let mut all_bytes_read = 0;
while !self.eof && self.length < self.capacity {
let bytes_read = self.source.read(&mut self.buffer[self.length..]).await?;
if bytes_read == 0 {
self.eof = true;
} else {
self.length += bytes_read;
all_bytes_read += bytes_read;
}
}
Ok(all_bytes_read)
}
}
/// Drains a specified number of bytes from the buffer, then resizes the
/// buffer back to `capacity` size in preparation for further reads.
fn drain_bytes(&mut self, count: usize) -> Result<Vec<u8>, Error> {
// this code originally copied from asuran crate
if count > self.length {
Err(Error::Other(format!(
"drain_bytes() called with count larger than length: {} > {}",
count, self.length
)))
} else {
let data = self.buffer.drain(..count).collect::<Vec<u8>>();
self.length -= count;
self.buffer.resize(self.capacity, 0_u8);
Ok(data)
}
}
/// Find the next chunk in the source. If the end of the source has been
/// reached, returns `Error::Empty` as the error.
async fn read_chunk(&mut self) -> Result<ChunkData, Error> {
self.fill_buffer().await?;
if self.length == 0 {
Err(Error::Empty)
} else {
let (hash, count) = cut(
&self.buffer[..self.length],
self.min_size,
self.avg_size,
self.max_size,
self.mask_s,
self.mask_l,
self.mask_s_ls,
self.mask_l_ls,
);
if count == 0 {
Err(Error::Empty)
} else {
let offset = self.processed;
self.processed += count as u64;
let data = self.drain_bytes(count)?;
Ok(ChunkData {
hash,
offset,
length: count,
data,
})
}
}
}
#[cfg(all(feature = "tokio", not(feature = "futures")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
try_stream! {
loop {
match self.read_chunk().await {
Ok(chunk) => yield chunk,
Err(Error::Empty) => {
break;
}
error @ Err(_) => {
error?;
}
}
}
}
}
#[cfg(all(feature = "futures", not(feature = "tokio")))]
pub fn as_stream(&mut self) -> impl Stream<Item = Result<ChunkData, Error>> + '_ {
futures::stream::unfold(self, |this| async {
let chunk = this.read_chunk().await;
if let Err(Error::Empty) = chunk {
None
} else {
Some((chunk, this))
}
})
}
}
#[cfg(test)]
mod tests {
use crate::v2020::MASKS;
use super::AsyncStreamCDC;
#[test]
#[should_panic]
fn test_minimum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 63, 256, 1024);
}
#[test]
#[should_panic]
fn test_minimum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 67_108_867, 256, 1024);
}
#[test]
#[should_panic]
fn test_average_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 255, 1024);
}
#[test]
#[should_panic]
fn test_average_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 268_435_457, 1024);
}
#[test]
#[should_panic]
fn test_maximum_too_low() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1023);
}
#[test]
#[should_panic]
fn test_maximum_too_high() {
let array = [0u8; 1024];
AsyncStreamCDC::new(array.as_slice(), 64, 256, 1_073_741_825);
}
#[test]
fn test_masks() {
let source = [0u8; 1024];
let chunker = AsyncStreamCDC::new(source.as_slice(), 64, 256, 1024);
assert_eq!(chunker.mask_l, MASKS[7]);
assert_eq!(chunker.mask_s, MASKS[9]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 8192, 16384, 32768);
assert_eq!(chunker.mask_l, MASKS[13]);
assert_eq!(chunker.mask_s, MASKS[15]);
let chunker = AsyncStreamCDC::new(source.as_slice(), 1_048_576, 4_194_304, 16_777_216);
assert_eq!(chunker.mask_l, MASKS[21]);
assert_eq!(chunker.mask_s, MASKS[23]);
| /// True when the source produces no more data.
eof: bool,
min_size: usize,
avg_size: usize, | random_line_split |
run_mlm_my.py | ,
set_seed,
)
from transformers.trainer_utils import is_main_process
from mydatasets import BERTPretrainedPairWiseDataset, PairCollator
from mymodels import PairWiseBertForPreTraining
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class | :
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
dataset_cache_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
dataset_script_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
limit: Optional[int] = field(
default=50000000,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
def __post_init__(self):
print("no need assert")
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# data_files = {}
# if data_args.train_file is not None:
# data_files["train"] = data_args.train_file
# if data_args.validation_file is not None:
# data_files["validation"] = data_args.validation_file
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = PairWiseBertForPreTraining.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = PairWiseBertForPreTraining.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Get datasets
print("start getting dataset....................")
if training_args.do_train:
print("start getting training dataset........")
train_dataset = BERTPretrainedPairWiseDataset(
data_args, tokenizer, data_args.dataset_cache_dir, data_args.dataset_script_dir
)
else:
train_dataset = None
print('getting dataset succcess................')
data_collator = PairCollator(tokenizer=tokenizer)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if (model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path))
else None
)
train_result = trainer.train(model_path=model_path)
trainer.save_model() # Saves the tokenizer | ModelArguments | identifier_name |
run_mlm_my.py | ,
set_seed,
)
from transformers.trainer_utils import is_main_process
from mydatasets import BERTPretrainedPairWiseDataset, PairCollator
from mymodels import PairWiseBertForPreTraining
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
dataset_cache_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
dataset_script_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
limit: Optional[int] = field(
default=50000000,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
def __post_init__(self):
print("no need assert")
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# data_files = {}
# if data_args.train_file is not None:
# data_files["train"] = data_args.train_file
# if data_args.validation_file is not None:
# data_files["validation"] = data_args.validation_file
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = PairWiseBertForPreTraining.from_pretrained( | revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = PairWiseBertForPreTraining.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Get datasets
print("start getting dataset....................")
if training_args.do_train:
print("start getting training dataset........")
train_dataset = BERTPretrainedPairWiseDataset(
data_args, tokenizer, data_args.dataset_cache_dir, data_args.dataset_script_dir
)
else:
train_dataset = None
print('getting dataset succcess................')
data_collator = PairCollator(tokenizer=tokenizer)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if (model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path))
else None
)
train_result = trainer.train(model_path=model_path)
trainer.save_model() # Saves the tokenizer too | model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir, | random_line_split |
run_mlm_my.py | ,
set_seed,
)
from transformers.trainer_utils import is_main_process
from mydatasets import BERTPretrainedPairWiseDataset, PairCollator
from mymodels import PairWiseBertForPreTraining
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
dataset_cache_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
dataset_script_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
limit: Optional[int] = field(
default=50000000,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
def __post_init__(self):
print("no need assert")
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# data_files = {}
# if data_args.train_file is not None:
# data_files["train"] = data_args.train_file
# if data_args.validation_file is not None:
# data_files["validation"] = data_args.validation_file
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
|
if model_args.model_name_or_path:
model = PairWiseBertForPreTraining.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = PairWiseBertForPreTraining.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Get datasets
print("start getting dataset....................")
if training_args.do_train:
print("start getting training dataset........")
train_dataset = BERTPretrainedPairWiseDataset(
data_args, tokenizer, data_args.dataset_cache_dir, data_args.dataset_script_dir
)
else:
train_dataset = None
print('getting dataset succcess................')
data_collator = PairCollator(tokenizer=tokenizer)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if (model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path))
else None
)
train_result = trainer.train(model_path=model_path)
trainer.save_model() # Saves the tokenizer | raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
) | conditional_block |
run_mlm_my.py | ,
set_seed,
)
from transformers.trainer_utils import is_main_process
from mydatasets import BERTPretrainedPairWiseDataset, PairCollator
from mymodels import PairWiseBertForPreTraining
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
| )
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
dataset_cache_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
dataset_script_dir: str = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
limit: Optional[int] = field(
default=50000000,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
def __post_init__(self):
print("no need assert")
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# data_files = {}
# if data_args.train_file is not None:
# data_files["train"] = data_args.train_file
# if data_args.validation_file is not None:
# data_files["validation"] = data_args.validation_file
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = PairWiseBertForPreTraining.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = PairWiseBertForPreTraining.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Get datasets
print("start getting dataset....................")
if training_args.do_train:
print("start getting training dataset........")
train_dataset = BERTPretrainedPairWiseDataset(
data_args, tokenizer, data_args.dataset_cache_dir, data_args.dataset_script_dir
)
else:
train_dataset = None
print('getting dataset succcess................')
data_collator = PairCollator(tokenizer=tokenizer)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if (model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path))
else None
)
train_result = trainer.train(model_path=model_path)
trainer.save_model() # Saves the tokenizer | """
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} | identifier_body |
titanic_keras_exp.py | except Exception as e:
raise e
# data exploration
print("shape: ", df.shape)
# statistical summary
description = df.describe()
print("description - no encoding:\n", description)
print()
plt.style.use('ggplot')
# input("Enter key to continue... \n")
# Feature-Feature Relationships
# scatter_matrix(df)
print()
# too many missing values in 'Cabin' columns: about 3/4
print("Dropping 'Cabin' column -- too many missing values")
# df.Cabin.replace(to_replace=np.nan, value='Unknown', inplace=True)
df.drop(['Cabin'], axis=1, inplace=True)
print()
print("Now, shape: ", df.shape)
print("df.head():\n", df.head())
print()
description = df.describe()
print("Once again, description - no encoding:\n", description)
print()
# input("Enter key to continue... \n")
target = 'Survived'
# feature preprocessing
sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)
X_train, X_test, y_train, y_test = sltt['arrays']
scoring = sltt['scoring']
Y_type = sltt['target_type']
labels = sltt['labels']
print("scoring:", scoring)
print()
print("Classes:", labels)
print()
print("X_train shape: ", X_train.shape)
print("X_train -- first row:", X_train.values[0])
print("y_train shape: ", y_train.shape)
print()
print("X_test shape: ", X_test.shape)
print("X_test -- first row:", X_test.values[0])
print("y_test shape: ", y_test.shape)
print()
print("y_train:", y_train[:3])
# input("Enter key to continue... \n")
print()
auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)
print()
encoding = auto_feat_eng_data['encoding']
scaler_tuple = auto_feat_eng_data['scaler']
featselector = auto_feat_eng_data['feat_selector']
steps = auto_feat_eng_data['steps']
X_train_transformed, y_train, X_test_transformed, y_test = auto_feat_eng_data['data_arrays']
X, y = auto_feat_eng_data['Xy']
train_index, test_index = auto_feat_eng_data['tt_index']
n_splits = au.select_nr_of_splits_for_kfold_cv()
# n_iter = au.select_nr_of_iterations()
print()
# This cross-validation object is a variation of KFold that returns stratified folds.
# The folds are made by preserving the percentage of samples for each class.
# uncomment to evaluate models != KerasClfs or GaussianNB w nested cv
# inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
outer_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
### reproducing the whole autoclf workflow
names = []
results = []
print("Metric:", scoring)
print("Calibration of untrained models -- CCCV 2nd")
print()
# Evaluation of best modelwith nested CV -- inner: RSCV
# dict of models and their associated parameters
# if it comes out that the best model is LogReg, no comparison is needed
# scoring == 'roc_auc' ==>
best_score = 0.5 # 0.0
best_score_dev = 0.1
best_cv_results = np.zeros(n_splits)
best_exec_time = 31536000 # one year in seconds
best_model = ('Random', None, None)
Dummy_scores = []
models_data = []
names = []
results = []
scores_of_best_model = (best_score, best_score_dev, best_cv_results,
best_exec_time, best_model)
# Start evaluation process
print()
print("=== [task] Evaluation of DummyClassifier")
print()
wtr = eu.calculate_sample_weight(y_train)
average_scores_and_best_scores = eu.single_classic_cv_evaluation(
X_train_transformed, y_train, 'DummyClf_2nd',
DummyClassifier(strategy='most_frequent'), wtr, scoring, outer_cv,
dict(), scores_of_best_model, results, names, seed)
scores_of_best_model = average_scores_and_best_scores[1]
Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC
Dummy_scores.append(scores_of_best_model[1]) # Dummy score std
Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results
Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time
# Dummy model's name and estimator
Dummy_scores.append(scores_of_best_model[4])
names = []
results = []
print()
complex_models_and_parameters = dict()
average_scores_across_outer_folds_complex = dict()
all_models_and_parameters = dict()
# Let's add some simple neural network
print("=== [task] Comparing DummyClassifier to best Keras Clf (NN)")
print()
# This is an experiment to check
# how different Keras architectures perform
# to avoid hard-coding NNs, you should determine at least
# nr of layers and nr of nodes by using Grid or Randomized Search CV
input_dim = int(X_train_transformed.shape[1])
output_dim = 1
nb_epoch = au.select_nr_of_iterations('nn')
# evaluate Keras clfs
cv_method = select_cv_method()
if cv_method == 1:
batch_size = 32
complex_models_and_parameters = create_keras_classifiers(
Y_type, input_dim, labels, nb_epoch, batch_size)
average_scores_and_best_scores = eu.classic_cv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters, scoring,
outer_cv, average_scores_across_outer_folds_complex,
scores_of_best_model, results, names, seed)
else:
inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
n_iter = au.select_nr_of_iterations()
keras_clf_name = "KerasClf_2nd"
keras_nn_model, keras_param_grid = create_best_keras_clf_architecture(
keras_clf_name, Y_type, labels, input_dim, nb_epoch, Keras_param_grid)
complex_models_and_parameters[keras_clf_name] = (
keras_nn_model, keras_param_grid)
average_scores_and_best_scores = eu.nested_rscv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters,
scoring, n_iter, inner_cv, outer_cv,
average_scores_across_outer_folds_complex, scores_of_best_model,
results, names, seed)
print()
au.box_plots_of_models_performance(results, names)
cv_method_name = "Classic" if cv_method == 1 else "Nested"
print()
print("=== After %s CV evaluation of Keras NNs..." % cv_method_name)
print()
scores_of_best_model = average_scores_and_best_scores[1]
best_model_name = scores_of_best_model[4][0]
best_model_estim = scores_of_best_model[4][1]
best_score = scores_of_best_model[0]
best_score_dev = scores_of_best_model[1]
best_cv_results = scores_of_best_model[2]
# best_brier_score = scores_of_best_model[2]
best_exec_time = scores_of_best_model[3]
Dummy_score = Dummy_scores[0]
Dummy_score_dev = Dummy_scores[1]
Dummy_cv_results = Dummy_scores[2]
# Dummy_brier_score = Dummy_scores[3]
Dummy_exec_time = Dummy_scores[3]
print()
print("Currently, best model is '%s' with score '%s': %1.3f (%1.3f)... :" %
(best_model_name, scoring.strip('neg_'), best_score, best_score_dev))
if best_model_name in (
'baseline_nn_default_Clf_2nd', 'baseline_nn_smaller_Clf_2nd',
'larger_nn_Clf_2nd', 'deep_nn_Clf_2nd', 'deeper_nn_Clf_2nd',
'KerasClf_2nd'):
best_nn_build_fn = scores_of_best_model[4][2]
print("Best build function:", best_nn_build_fn)
print("... execution time: %.2fs" % best_exec_time)
# print("and prediction confidence: %1.3f" % best_brier_score)
print()
if best_model_name != 'DummyClf_2nd':
# It's assumed best model's performance is
# satistically better than that of DummyClf on this dataset
print("DummyClassifier's scores -- '%s': %1.3f (%1.3f)" % (
scoring.strip('neg_'), Dummy_score, Dummy_score_dev))
print("'%s' does better than DummyClassifier." % best_model_name)
if best_exec_time < Dummy_exec_time:
| print("'%s' is quicker than DummyClf." % best_model_name) | conditional_block |
|
titanic_keras_exp.py |
else:
print("Invalid number. Try again...")
except ValueError as e:
print("'%s' is not a valid integer." % e.args[0].split(": ")[1])
return choice
# starting program
if __name__ == '__main__':
print("### Probability Calibration Experiment -- CalibratedClassifierCV "
"with cv=cv (no prefit) ###")
print()
d_name = ga.get_name()
if d_name is None:
d_name = "titanic"
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# load data
try:
df = read_csv(
'datasets\\titanic_train.csv', delimiter=",",
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
print("Found data in autoclf\\autoclf\\datasets")
except FileNotFoundError as fe:
titanic_bytes = resource_string(
"autoclf", os.path.join("datasets", 'titanic_train.csv'))
titanic_file = StringIO(str(titanic_bytes,'utf-8'))
names = ['PassengerId','Survived','Pclass','Name','Sex','Age','SibSp',
'Parch','Ticket','Fare','Cabin','Embarked']
df = read_csv(
titanic_file, delimiter=",",
# header=0, names=names,
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
except Exception as e:
raise e
# data exploration
print("shape: ", df.shape)
# statistical summary
description = df.describe()
print("description - no encoding:\n", description)
print()
| # Feature-Feature Relationships
# scatter_matrix(df)
print()
# too many missing values in 'Cabin' columns: about 3/4
print("Dropping 'Cabin' column -- too many missing values")
# df.Cabin.replace(to_replace=np.nan, value='Unknown', inplace=True)
df.drop(['Cabin'], axis=1, inplace=True)
print()
print("Now, shape: ", df.shape)
print("df.head():\n", df.head())
print()
description = df.describe()
print("Once again, description - no encoding:\n", description)
print()
# input("Enter key to continue... \n")
target = 'Survived'
# feature preprocessing
sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)
X_train, X_test, y_train, y_test = sltt['arrays']
scoring = sltt['scoring']
Y_type = sltt['target_type']
labels = sltt['labels']
print("scoring:", scoring)
print()
print("Classes:", labels)
print()
print("X_train shape: ", X_train.shape)
print("X_train -- first row:", X_train.values[0])
print("y_train shape: ", y_train.shape)
print()
print("X_test shape: ", X_test.shape)
print("X_test -- first row:", X_test.values[0])
print("y_test shape: ", y_test.shape)
print()
print("y_train:", y_train[:3])
# input("Enter key to continue... \n")
print()
auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)
print()
encoding = auto_feat_eng_data['encoding']
scaler_tuple = auto_feat_eng_data['scaler']
featselector = auto_feat_eng_data['feat_selector']
steps = auto_feat_eng_data['steps']
X_train_transformed, y_train, X_test_transformed, y_test = auto_feat_eng_data['data_arrays']
X, y = auto_feat_eng_data['Xy']
train_index, test_index = auto_feat_eng_data['tt_index']
n_splits = au.select_nr_of_splits_for_kfold_cv()
# n_iter = au.select_nr_of_iterations()
print()
# This cross-validation object is a variation of KFold that returns stratified folds.
# The folds are made by preserving the percentage of samples for each class.
# uncomment to evaluate models != KerasClfs or GaussianNB w nested cv
# inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
outer_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
### reproducing the whole autoclf workflow
names = []
results = []
print("Metric:", scoring)
print("Calibration of untrained models -- CCCV 2nd")
print()
# Evaluation of best modelwith nested CV -- inner: RSCV
# dict of models and their associated parameters
# if it comes out that the best model is LogReg, no comparison is needed
# scoring == 'roc_auc' ==>
best_score = 0.5 # 0.0
best_score_dev = 0.1
best_cv_results = np.zeros(n_splits)
best_exec_time = 31536000 # one year in seconds
best_model = ('Random', None, None)
Dummy_scores = []
models_data = []
names = []
results = []
scores_of_best_model = (best_score, best_score_dev, best_cv_results,
best_exec_time, best_model)
# Start evaluation process
print()
print("=== [task] Evaluation of DummyClassifier")
print()
wtr = eu.calculate_sample_weight(y_train)
average_scores_and_best_scores = eu.single_classic_cv_evaluation(
X_train_transformed, y_train, 'DummyClf_2nd',
DummyClassifier(strategy='most_frequent'), wtr, scoring, outer_cv,
dict(), scores_of_best_model, results, names, seed)
scores_of_best_model = average_scores_and_best_scores[1]
Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC
Dummy_scores.append(scores_of_best_model[1]) # Dummy score std
Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results
Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time
# Dummy model's name and estimator
Dummy_scores.append(scores_of_best_model[4])
names = []
results = []
print()
complex_models_and_parameters = dict()
average_scores_across_outer_folds_complex = dict()
all_models_and_parameters = dict()
# Let's add some simple neural network
print("=== [task] Comparing DummyClassifier to best Keras Clf (NN)")
print()
# This is an experiment to check
# how different Keras architectures perform
# to avoid hard-coding NNs, you should determine at least
# nr of layers and nr of nodes by using Grid or Randomized Search CV
input_dim = int(X_train_transformed.shape[1])
output_dim = 1
nb_epoch = au.select_nr_of_iterations('nn')
# evaluate Keras clfs
cv_method = select_cv_method()
if cv_method == 1:
batch_size = 32
complex_models_and_parameters = create_keras_classifiers(
Y_type, input_dim, labels, nb_epoch, batch_size)
average_scores_and_best_scores = eu.classic_cv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters, scoring,
outer_cv, average_scores_across_outer_folds_complex,
scores_of_best_model, results, names, seed)
else:
inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
n_iter = au.select_nr_of_iterations()
keras_clf_name = "KerasClf_2nd"
keras_nn_model, keras_param_grid = create_best_keras_clf_architecture(
keras_clf_name, Y_type, labels, input_dim, nb_epoch, Keras_param_grid)
complex_models_and_parameters[keras_clf_name] = (
keras_nn_model, keras_param_grid)
average_scores_and_best_scores = eu.nested_rscv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters,
scoring, n_iter, inner_cv, outer_cv,
average_scores_across_outer_folds_complex, scores_of_best_model,
results, names, seed)
print()
au.box_plots_of_models_performance(results, names)
cv_method_name = "Classic" if cv_method == 1 else "Nested"
print()
print("=== After %s CV evaluation of Keras NNs..." % cv_method_name)
print()
scores_of_best_model = average_scores_and_best_scores[1]
best_model_name = scores_of_best_model[4][0]
best_model_estim = scores_of_best_model[4][1]
best_score = scores_of_best_model[0]
best_score_dev = scores_of_best_model[1]
best_cv | plt.style.use('ggplot')
# input("Enter key to continue... \n")
| random_line_split |
titanic_keras_exp.py | ():
is_valid = 0
choice = 0
while not is_valid:
try:
choice = int(input("Select cv method: [1] Classical CV, [2] Nested-CV?\n"))
if choice in (1, 2):
is_valid = 1
else:
print("Invalid number. Try again...")
except ValueError as e:
print("'%s' is not a valid integer." % e.args[0].split(": ")[1])
return choice
# starting program
if __name__ == '__main__':
print("### Probability Calibration Experiment -- CalibratedClassifierCV "
"with cv=cv (no prefit) ###")
print()
d_name = ga.get_name()
if d_name is None:
d_name = "titanic"
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# load data
try:
df = read_csv(
'datasets\\titanic_train.csv', delimiter=",",
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
print("Found data in autoclf\\autoclf\\datasets")
except FileNotFoundError as fe:
titanic_bytes = resource_string(
"autoclf", os.path.join("datasets", 'titanic_train.csv'))
titanic_file = StringIO(str(titanic_bytes,'utf-8'))
names = ['PassengerId','Survived','Pclass','Name','Sex','Age','SibSp',
'Parch','Ticket','Fare','Cabin','Embarked']
df = read_csv(
titanic_file, delimiter=",",
# header=0, names=names,
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
except Exception as e:
raise e
# data exploration
print("shape: ", df.shape)
# statistical summary
description = df.describe()
print("description - no encoding:\n", description)
print()
plt.style.use('ggplot')
# input("Enter key to continue... \n")
# Feature-Feature Relationships
# scatter_matrix(df)
print()
# too many missing values in 'Cabin' columns: about 3/4
print("Dropping 'Cabin' column -- too many missing values")
# df.Cabin.replace(to_replace=np.nan, value='Unknown', inplace=True)
df.drop(['Cabin'], axis=1, inplace=True)
print()
print("Now, shape: ", df.shape)
print("df.head():\n", df.head())
print()
description = df.describe()
print("Once again, description - no encoding:\n", description)
print()
# input("Enter key to continue... \n")
target = 'Survived'
# feature preprocessing
sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)
X_train, X_test, y_train, y_test = sltt['arrays']
scoring = sltt['scoring']
Y_type = sltt['target_type']
labels = sltt['labels']
print("scoring:", scoring)
print()
print("Classes:", labels)
print()
print("X_train shape: ", X_train.shape)
print("X_train -- first row:", X_train.values[0])
print("y_train shape: ", y_train.shape)
print()
print("X_test shape: ", X_test.shape)
print("X_test -- first row:", X_test.values[0])
print("y_test shape: ", y_test.shape)
print()
print("y_train:", y_train[:3])
# input("Enter key to continue... \n")
print()
auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)
print()
encoding = auto_feat_eng_data['encoding']
scaler_tuple = auto_feat_eng_data['scaler']
featselector = auto_feat_eng_data['feat_selector']
steps = auto_feat_eng_data['steps']
X_train_transformed, y_train, X_test_transformed, y_test = auto_feat_eng_data['data_arrays']
X, y = auto_feat_eng_data['Xy']
train_index, test_index = auto_feat_eng_data['tt_index']
n_splits = au.select_nr_of_splits_for_kfold_cv()
# n_iter = au.select_nr_of_iterations()
print()
# This cross-validation object is a variation of KFold that returns stratified folds.
# The folds are made by preserving the percentage of samples for each class.
# uncomment to evaluate models != KerasClfs or GaussianNB w nested cv
# inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
outer_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
### reproducing the whole autoclf workflow
names = []
results = []
print("Metric:", scoring)
print("Calibration of untrained models -- CCCV 2nd")
print()
# Evaluation of best modelwith nested CV -- inner: RSCV
# dict of models and their associated parameters
# if it comes out that the best model is LogReg, no comparison is needed
# scoring == 'roc_auc' ==>
best_score = 0.5 # 0.0
best_score_dev = 0.1
best_cv_results = np.zeros(n_splits)
best_exec_time = 31536000 # one year in seconds
best_model = ('Random', None, None)
Dummy_scores = []
models_data = []
names = []
results = []
scores_of_best_model = (best_score, best_score_dev, best_cv_results,
best_exec_time, best_model)
# Start evaluation process
print()
print("=== [task] Evaluation of DummyClassifier")
print()
wtr = eu.calculate_sample_weight(y_train)
average_scores_and_best_scores = eu.single_classic_cv_evaluation(
X_train_transformed, y_train, 'DummyClf_2nd',
DummyClassifier(strategy='most_frequent'), wtr, scoring, outer_cv,
dict(), scores_of_best_model, results, names, seed)
scores_of_best_model = average_scores_and_best_scores[1]
Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC
Dummy_scores.append(scores_of_best_model[1]) # Dummy score std
Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results
Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time
# Dummy model's name and estimator
Dummy_scores.append(scores_of_best_model[4])
names = []
results = []
print()
complex_models_and_parameters = dict()
average_scores_across_outer_folds_complex = dict()
all_models_and_parameters = dict()
# Let's add some simple neural network
print("=== [task] Comparing DummyClassifier to best Keras Clf (NN)")
print()
# This is an experiment to check
# how different Keras architectures perform
# to avoid hard-coding NNs, you should determine at least
# nr of layers and nr of nodes by using Grid or Randomized Search CV
input_dim = int(X_train_transformed.shape[1])
output_dim = 1
nb_epoch = au.select_nr_of_iterations('nn')
# evaluate Keras clfs
cv_method = select_cv_method()
if cv_method == 1:
batch_size = 32
complex_models_and_parameters = create_keras_classifiers(
Y_type, input_dim, labels, nb_epoch, batch_size)
average_scores_and_best_scores = eu.classic_cv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters, scoring,
outer_cv, average_scores_across_outer_folds_complex,
scores_of_best_model, results, names, seed)
else:
inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
n_iter = au.select_nr_of_iterations()
keras_clf_name = "KerasClf_2nd"
keras_nn_model, keras_param_grid = create_best_keras_clf_architecture(
keras_clf_name, Y_type, labels, input_dim, nb_epoch, Keras_param_grid)
complex_models_and_parameters[keras_clf_name] = (
keras_nn_model, keras_param_grid)
average_scores_and_best_scores = eu.nested_rscv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters,
scoring, n_iter, inner_cv, outer_cv,
average_scores_across_outer_folds_complex, scores_of_best_model,
results, names, seed)
print()
au.box_plots_of_models_performance(results, names)
cv_method_name = "Classic" if cv_method == 1 else "Nested"
print()
print("=== After %s CV evaluation of Keras NNs..." % cv_method_name)
print()
scores | select_cv_method | identifier_name |
|
titanic_keras_exp.py |
# starting program
if __name__ == '__main__':
print("### Probability Calibration Experiment -- CalibratedClassifierCV "
"with cv=cv (no prefit) ###")
print()
d_name = ga.get_name()
if d_name is None:
d_name = "titanic"
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# load data
try:
df = read_csv(
'datasets\\titanic_train.csv', delimiter=",",
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
print("Found data in autoclf\\autoclf\\datasets")
except FileNotFoundError as fe:
titanic_bytes = resource_string(
"autoclf", os.path.join("datasets", 'titanic_train.csv'))
titanic_file = StringIO(str(titanic_bytes,'utf-8'))
names = ['PassengerId','Survived','Pclass','Name','Sex','Age','SibSp',
'Parch','Ticket','Fare','Cabin','Embarked']
df = read_csv(
titanic_file, delimiter=",",
# header=0, names=names,
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
except Exception as e:
raise e
# data exploration
print("shape: ", df.shape)
# statistical summary
description = df.describe()
print("description - no encoding:\n", description)
print()
plt.style.use('ggplot')
# input("Enter key to continue... \n")
# Feature-Feature Relationships
# scatter_matrix(df)
print()
# too many missing values in 'Cabin' columns: about 3/4
print("Dropping 'Cabin' column -- too many missing values")
# df.Cabin.replace(to_replace=np.nan, value='Unknown', inplace=True)
df.drop(['Cabin'], axis=1, inplace=True)
print()
print("Now, shape: ", df.shape)
print("df.head():\n", df.head())
print()
description = df.describe()
print("Once again, description - no encoding:\n", description)
print()
# input("Enter key to continue... \n")
target = 'Survived'
# feature preprocessing
sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)
X_train, X_test, y_train, y_test = sltt['arrays']
scoring = sltt['scoring']
Y_type = sltt['target_type']
labels = sltt['labels']
print("scoring:", scoring)
print()
print("Classes:", labels)
print()
print("X_train shape: ", X_train.shape)
print("X_train -- first row:", X_train.values[0])
print("y_train shape: ", y_train.shape)
print()
print("X_test shape: ", X_test.shape)
print("X_test -- first row:", X_test.values[0])
print("y_test shape: ", y_test.shape)
print()
print("y_train:", y_train[:3])
# input("Enter key to continue... \n")
print()
auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)
print()
encoding = auto_feat_eng_data['encoding']
scaler_tuple = auto_feat_eng_data['scaler']
featselector = auto_feat_eng_data['feat_selector']
steps = auto_feat_eng_data['steps']
X_train_transformed, y_train, X_test_transformed, y_test = auto_feat_eng_data['data_arrays']
X, y = auto_feat_eng_data['Xy']
train_index, test_index = auto_feat_eng_data['tt_index']
n_splits = au.select_nr_of_splits_for_kfold_cv()
# n_iter = au.select_nr_of_iterations()
print()
# This cross-validation object is a variation of KFold that returns stratified folds.
# The folds are made by preserving the percentage of samples for each class.
# uncomment to evaluate models != KerasClfs or GaussianNB w nested cv
# inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
outer_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
### reproducing the whole autoclf workflow
names = []
results = []
print("Metric:", scoring)
print("Calibration of untrained models -- CCCV 2nd")
print()
# Evaluation of best modelwith nested CV -- inner: RSCV
# dict of models and their associated parameters
# if it comes out that the best model is LogReg, no comparison is needed
# scoring == 'roc_auc' ==>
best_score = 0.5 # 0.0
best_score_dev = 0.1
best_cv_results = np.zeros(n_splits)
best_exec_time = 31536000 # one year in seconds
best_model = ('Random', None, None)
Dummy_scores = []
models_data = []
names = []
results = []
scores_of_best_model = (best_score, best_score_dev, best_cv_results,
best_exec_time, best_model)
# Start evaluation process
print()
print("=== [task] Evaluation of DummyClassifier")
print()
wtr = eu.calculate_sample_weight(y_train)
average_scores_and_best_scores = eu.single_classic_cv_evaluation(
X_train_transformed, y_train, 'DummyClf_2nd',
DummyClassifier(strategy='most_frequent'), wtr, scoring, outer_cv,
dict(), scores_of_best_model, results, names, seed)
scores_of_best_model = average_scores_and_best_scores[1]
Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC
Dummy_scores.append(scores_of_best_model[1]) # Dummy score std
Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results
Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time
# Dummy model's name and estimator
Dummy_scores.append(scores_of_best_model[4])
names = []
results = []
print()
complex_models_and_parameters = dict()
average_scores_across_outer_folds_complex = dict()
all_models_and_parameters = dict()
# Let's add some simple neural network
print("=== [task] Comparing DummyClassifier to best Keras Clf (NN)")
print()
# This is an experiment to check
# how different Keras architectures perform
# to avoid hard-coding NNs, you should determine at least
# nr of layers and nr of nodes by using Grid or Randomized Search CV
input_dim = int(X_train_transformed.shape[1])
output_dim = 1
nb_epoch = au.select_nr_of_iterations('nn')
# evaluate Keras clfs
cv_method = select_cv_method()
if cv_method == 1:
batch_size = 32
complex_models_and_parameters = create_keras_classifiers(
Y_type, input_dim, labels, nb_epoch, batch_size)
average_scores_and_best_scores = eu.classic_cv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters, scoring,
outer_cv, average_scores_across_outer_folds_complex,
scores_of_best_model, results, names, seed)
else:
inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
n_iter = au.select_nr_of_iterations()
keras_clf_name = "KerasClf_2nd"
keras_nn_model, keras_param_grid = create_best_keras_clf_architecture(
keras_clf_name, Y_type, labels, input_dim, nb_epoch, Keras_param_grid)
complex_models_and_parameters[keras_clf_name] = (
keras_nn_model, keras_param_grid)
average_scores_and_best_scores = eu.nested_rscv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters,
scoring, n_iter, inner_cv, outer_cv,
average_scores_across_outer_folds_complex, scores_of_best_model,
results, names, seed)
print()
au.box_plots_of_models_performance(results, names)
cv_method_name = "Classic" if cv_method == 1 else "Nested"
print()
print("=== After %s CV evaluation of Keras NNs..." % cv_method_name)
print()
scores_of_best_model = average | is_valid = 0
choice = 0
while not is_valid:
try:
choice = int(input("Select cv method: [1] Classical CV, [2] Nested-CV?\n"))
if choice in (1, 2):
is_valid = 1
else:
print("Invalid number. Try again...")
except ValueError as e:
print("'%s' is not a valid integer." % e.args[0].split(": ")[1])
return choice | identifier_body |
|
AUTO_ASSAM_PART3.py | 0]].append(d[1]+"("+d[2]+")")
dict_indication_mappings[d[0]].append(d[3])
#dictionary of source organism
dict_source_mappings = collections.defaultdict(list)
for s in rows2: dict_source_mappings[s[0].lower()+s[1]].append(s[3])
#dictionary of pdb-macromolecule mappings
dict_macromolecule_mappings = collections.defaultdict(list)
for m in rows2: dict_macromolecule_mappings[m[0].lower()+m[1]].append(m[2])
#pfam annotation
pfam_annotation = collections.defaultdict(list)
for p in rows2: pfam_annotation[p[0].lower()+p[1]].append(p[4])
csv_writer = csv.writer(open("BINDING_INTERFACES.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_ligand_id]))))
organism = ";".join(sorted(list(set([k for n in [dict_source_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).replace(",",";")
if organism == "": organism = "-"
macromolecule = ";".join(sorted(list(set([k for n in [dict_macromolecule_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).upper()
if macromolecule == "": macromolecule = "-"
pfam_id = ";".join(sorted(list(set([k for n in [pfam_annotation[pdbchain] for pdbchain in pdbchains0] for k in n]))))
compiled_details = [dreposed_id]+[pdb]+[pdb_ligand_id]+[drugbank_id]+[indications]+[organism]+[macromolecule]+[pfam_id]+[binding_residues]+[hetatm_residues] #arrangement in csv file
#print compiled_details
csv_writer.writerow(compiled_details)
csv_writer2 = csv.writer(open("BINDING_INTERFACES_CLUSTERS.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES_CLUSTERS/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_ligand_id]))))
organism = ";".join(sorted(list(set([k for n in [dict_source_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).replace(",",";")
if organism == "": organism = "-"
macromolecule = ";".join(sorted(list(set([k for n in [dict_macromolecule_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).upper()
if macromolecule == "": macromolecule = "-"
pfam_id = ";".join(sorted(list(set([k for n in [pfam_annotation[pdbchain] for pdbchain in pdbchains0] for k in n]))))
compiled_details = [dreposed_id]+[pdb]+[pdb_ligand_id]+[drugbank_id]+[indications]+[organism]+[macromolecule]+[pfam_id]+[binding_residues]+[hetatm_residues] #arrangement in csv file
#print compiled_details
csv_writer.writerow(compiled_details)
def change_het_res(hetatm_residues):
if "from" in hetatm_residues:
if "site" not in hetatm_residues:
rr = hetatm_residues
het_resx = [" ".join([r.split(" from ")[1].replace("s","")[-9:]]+["("+r.split(" from ")[0].replace(" ","")+")" if "-" in r.split(" from ")[0] else "( "+r.split(" from ")[0].replace(" ","")+")" ]) if float(re.findall("[0-9]+[.][0-9]+",r.split(" from ")[0])[0])<5.0 else "None" for r in rr.split(";")]
if all(item == "None" for item in het_resx) == True: het_res = "None"
else: het_res = ";".join(het_resx)
else: het_res = "None"
else: het_res = "None"
return het_res
#BINDING_INTERFACES_ASSAM
def save_output_binding_interfaces_assam():
pdb_dict=collections.defaultdict(list)
pdb_new_dict=collections.defaultdict(list)
pdbs_all=[["_".join(i.split("/")[-1].split("_")[:3]).upper()]+[i.split("/")[-1].replace(".pdb","")] for i in glob.glob("renew_bs_finalized/*.pdb")]
for pdb in pdbs_all: pdb_dict[pdb[0]].append(pdb[1])
for k in pdb_dict.keys():
for index, pdb in enumerate(pdb_dict[k]):
pdb_new_dict[pdb].append(k+"_"+str(index))
csv_writer=csv.writer(open("BINDING_INTERFACES_ASSAM.csv","w"),delimiter=",")
all_cs=[cs for cs in glob.glob("/Users/nursyatila/NSAG_PART2/ASS_EXE/drreposer_output/*/*/*_sum_2.csv")] for cs in all_cs:
csv_readerx=[pdb_new_dict[row[0]]+row[1:] for row in csv.reader(open(cs,"r"),delimiter=",")]
#rowsx=[pdb_new_dict[row[0]]+row[1:6]+[";".join([change_het_res(hetatm_residues) for hetatm_residues in row[6].split(";")])]+row[7:] for row in csv_readerx]
for r in csv_readerx:
csv_writer.writerow(r)
#BINDING_INTERFACES_EXACT
def binding_interfaces_exact():
csv_readerx=csv.reader(open("BINDING_INTERFACES_CLUSTERS.csv","r"),delimiter=",")
rowsx=[[row[0]]+[row[8]] for row in csv_readerx]
dreposer_id_dict=collections.defaultdict(list)
for r in rowsx: dreposer_id_dict[r[0]].append(len(r[1].split(";")))
csv_readerxx=csv.reader(open("BINDING_INTERFACES_ASSAM.csv","r"),delimiter=",")
rowsxx=[row for row in csv_readerxx if int(dreposer_id_dict[row[0]][0])==len(row[5].split(";"))]
csv_writerxx=csv.writer(open("BINDING_INTERFACES_ASSAM_EXACT.csv","w"),delimiter=",")
for r in rowsxx:
csv_writerxx.writerow([str(dreposer_id_dict[r[0]][0])]+r)
#=========PART 3b: SAVE PDB-FORMATTED AND GRAPH PATTERNS FOR SPRITE SEARCHES===========
#rename pdb file containing binding sites
def | rename_pdb_dbs | identifier_name |
|
AUTO_ASSAM_PART3.py | #A program to parse user.LP
import itertools, sys, os, subprocess, shutil, glob, numpy as np, re, collections, operator, datetime, optparse, csv
#import Bio
trans = {'ALA':'A','CYS':'C','CYH':'C','CSS':'C','ASP':'D','GLU':'E','PHE':'F','GLY':'G','HIS':'H','ILE':'I','LYS':'K','LEU':'L','MET':'M','ASN':'N','PRO':'P','GLN':'Q','ARG':'R','SER':'S','THR':'T','VAL':'V','TRP':'W','TYR':'Y','UNK':'X','MSE':'M'}
atom_list = {
"ALA":('N ','C ','O ','CA ','CB '), "CYS":('N ','C ','O ','CA ','CB ','SG '),"ASP":('N ','C ','O ','CA ','CB ','CG ','OD1','OD2'),"GLU":('N ','C ','O ','CA ','CB ','CG ','CD ','OE1','OE2'),"PHE":('N ','C ','O ','CA ','CB ','CG ','CD1','CD2','CE1','CE2','CZ '),"GLY":('N ','C ','O ','CA '),"HIS":('N ','C ','O ','CA ','CB ','CG ','ND1','CD2','CE1','NE2'),
"ILE":('N ','C ','O ','CA ','CB ','CG1','CG2','CD1'),"LYS":('N ','C ','O ','CA ','CB ','CG ','CD ','CE ','NZ '),"LEU":('N ','C ','O ','CA ','CB ','CG ','CD1','CD2'),"MET":('N ','C ','O ','CA ','CB ','CG ','SD ','CE '),"ASN":('N ','C ','O ','CA ','CB ','CG ','OD1','ND2'),"PRO":('N ','C ','O ','CA ','CB ','CG ','CD '),"GLN":('N ','C ','O ','CA ','CB ','CG ','CD ','OE1','NE2'),"ARG":('N ','C ','O ','CA ','CB ','CG ','CD ','NE ','CZ ','NH1','NH2'),
"SER":('N ','C ','O ','CA ','CB ','OG '),"THR":('N ','C ','O ','CA ','CB ','OG1','CG2'),"VAL":('N ','C ','O ','CA ','CB ','CG1','CG2'),"TRP":('N ','C ','O ','CA ','CB ','CG ','CD1','CD2','NE1','CE2','CE3','CZ2','CZ3','CH2'),"TYR":('N ','C ','O ','CA ','CB ','CG ','CD1','CD2','CE1','CE2','CZ ','OH ')}
#=========PART 3a: SAVE INFORMATION===========
#save information of annotated drug binding sites for Drug ReposER application
#BINDING_INTERFACES_NEW, BINDING_INTERFACES_CLUSTERS, BINDING_INTERFACES_ASSAM, BINDING_INTERFACES_EXACT
def save_output_binding_interfaces():
csv_reader=csv.reader(open("pdbdescription.csv","r"),delimiter=",")
rows=[row for row in csv_reader][1:-2]
rows2=[row[:4]+[";".join(sorted(list(set([z+"("+y+")" for z,y in zip(list(set([n.split()[0] for n in row[4].split(", ")])),row[5].split(", "))])))) if row[4] != "" else "None"]+row[6:] for row in rows]
drugbank_mappings = [[i.replace("\n","").split("\t")[0]]+[i.replace("\n","").split("\t")[1]+"("+i.replace("\n","").split("\t")[2]+")"] for i in open("/Users/nursyatila/NSAG_PART2/db/pdb_drugbank.txt","r").readlines()]
#dictionary of drugbank annotation
dict_drugbank_mappings = collections.defaultdict(list)
#dictionary of drug indication
dict_indication_mappings = collections.defaultdict(list)
for d in drugbank_mappings:
dict_drugbank_mappings[d[0]].append(d[1]+"("+d[2]+")")
dict_indication_mappings[d[0]].append(d[3])
#dictionary of source organism
dict_source_mappings = collections.defaultdict(list)
for s in rows2: dict_source_mappings[s[0].lower()+s[1]].append(s[3])
#dictionary of pdb-macromolecule mappings
dict_macromolecule_mappings = collections.defaultdict(list)
for m in rows2: dict_macromolecule_mappings[m[0].lower()+m[1]].append(m[2])
#pfam annotation
pfam_annotation = collections.defaultdict(list)
for p in rows2: pfam_annotation[p[0].lower()+p[1]].append(p[4])
csv_writer = csv.writer(open("BINDING_INTERFACES.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_ligand_id]))))
organism = ";".join(sorted(list(set([k for n in [dict_source_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).replace(",",";")
if organism == "": organism = "-"
macromolecule = ";".join(sorted(list(set([k for n in [dict_macromolecule_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).upper()
if macromolecule == "": macromolecule = "-"
pfam_id = ";".join(sorted(list(set([k for n in [pfam_annotation[pdbchain] for pdbchain in pdbchains0] for k in n]))))
compiled_details = [dreposed_id]+[pdb]+[pdb_ligand_id]+[drugbank_id]+[indications]+[organism]+[macromolecule]+[pfam_id]+[binding_residues]+[hetatm_residues] #arrangement in csv file
#print compiled_details
csv_writer.writerow(compiled_details)
csv_writer2 = csv.writer(open("BINDING_INTERFACES_CLUSTERS.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES_CLUSTERS/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_l | #!/usr/bin/env python
# -*- coding: utf-8 -*- | random_line_split |
|
AUTO_ASSAM_PART3.py | ','CA ','CB ','CG ','OD1','ND2'),"PRO":('N ','C ','O ','CA ','CB ','CG ','CD '),"GLN":('N ','C ','O ','CA ','CB ','CG ','CD ','OE1','NE2'),"ARG":('N ','C ','O ','CA ','CB ','CG ','CD ','NE ','CZ ','NH1','NH2'),
"SER":('N ','C ','O ','CA ','CB ','OG '),"THR":('N ','C ','O ','CA ','CB ','OG1','CG2'),"VAL":('N ','C ','O ','CA ','CB ','CG1','CG2'),"TRP":('N ','C ','O ','CA ','CB ','CG ','CD1','CD2','NE1','CE2','CE3','CZ2','CZ3','CH2'),"TYR":('N ','C ','O ','CA ','CB ','CG ','CD1','CD2','CE1','CE2','CZ ','OH ')}
#=========PART 3a: SAVE INFORMATION===========
#save information of annotated drug binding sites for Drug ReposER application
#BINDING_INTERFACES_NEW, BINDING_INTERFACES_CLUSTERS, BINDING_INTERFACES_ASSAM, BINDING_INTERFACES_EXACT
def save_output_binding_interfaces():
csv_reader=csv.reader(open("pdbdescription.csv","r"),delimiter=",")
rows=[row for row in csv_reader][1:-2]
rows2=[row[:4]+[";".join(sorted(list(set([z+"("+y+")" for z,y in zip(list(set([n.split()[0] for n in row[4].split(", ")])),row[5].split(", "))])))) if row[4] != "" else "None"]+row[6:] for row in rows]
drugbank_mappings = [[i.replace("\n","").split("\t")[0]]+[i.replace("\n","").split("\t")[1]+"("+i.replace("\n","").split("\t")[2]+")"] for i in open("/Users/nursyatila/NSAG_PART2/db/pdb_drugbank.txt","r").readlines()]
#dictionary of drugbank annotation
dict_drugbank_mappings = collections.defaultdict(list)
#dictionary of drug indication
dict_indication_mappings = collections.defaultdict(list)
for d in drugbank_mappings:
dict_drugbank_mappings[d[0]].append(d[1]+"("+d[2]+")")
dict_indication_mappings[d[0]].append(d[3])
#dictionary of source organism
dict_source_mappings = collections.defaultdict(list)
for s in rows2: dict_source_mappings[s[0].lower()+s[1]].append(s[3])
#dictionary of pdb-macromolecule mappings
dict_macromolecule_mappings = collections.defaultdict(list)
for m in rows2: dict_macromolecule_mappings[m[0].lower()+m[1]].append(m[2])
#pfam annotation
pfam_annotation = collections.defaultdict(list)
for p in rows2: pfam_annotation[p[0].lower()+p[1]].append(p[4])
csv_writer = csv.writer(open("BINDING_INTERFACES.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_ligand_id]))))
organism = ";".join(sorted(list(set([k for n in [dict_source_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).replace(",",";")
if organism == "": organism = "-"
macromolecule = ";".join(sorted(list(set([k for n in [dict_macromolecule_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).upper()
if macromolecule == "": macromolecule = "-"
pfam_id = ";".join(sorted(list(set([k for n in [pfam_annotation[pdbchain] for pdbchain in pdbchains0] for k in n]))))
compiled_details = [dreposed_id]+[pdb]+[pdb_ligand_id]+[drugbank_id]+[indications]+[organism]+[macromolecule]+[pfam_id]+[binding_residues]+[hetatm_residues] #arrangement in csv file
#print compiled_details
csv_writer.writerow(compiled_details)
csv_writer2 = csv.writer(open("BINDING_INTERFACES_CLUSTERS.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES_CLUSTERS/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_ligand_id]))))
organism = ";".join(sorted(list(set([k for n in [dict_source_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).replace(",",";")
if organism == "": organism = "-"
macromolecule = ";".join(sorted(list(set([k for n in [dict_macromolecule_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).upper()
if macromolecule == "": macromolecule = "-"
pfam_id = ";".join(sorted(list(set([k for n in [pfam_annotation[pdbchain] for pdbchain in pdbchains0] for k in n]))))
compiled_details = [dreposed_id]+[pdb]+[pdb_ligand_id]+[drugbank_id]+[indications]+[organism]+[macromolecule]+[pfam_id]+[binding_residues]+[hetatm_residues] #arrangement in csv file
#print compiled_details
csv_writer.writerow(compiled_details)
def change_het_res(hetatm_residues):
if "from" in hetatm_residues:
if "site" not in hetatm_residues:
rr = hetatm_residues
het_resx = [" ".join([r.split(" from ")[1].replace("s","")[-9:]]+["("+r.split(" from ")[0].replace(" ","")+")" if "-" in r.split(" from ")[0] else "( "+r.split(" from ")[0].replace(" ","")+")" ]) if float(re.findall("[0-9]+[.][0-9]+",r.split(" from ")[0])[0])<5.0 else "None" for r in rr.split(";")]
if all(item == "None" for item in het_resx) == True: |
else: het_res = ";".join(het_resx)
else: het_res = "None"
else: het_res = "None"
return het_res
#BINDING_INTERFACES_ASSAM
def | het_res = "None" | conditional_block |
AUTO_ASSAM_PART3.py | save_output_binding_interfaces():
csv_reader=csv.reader(open("pdbdescription.csv","r"),delimiter=",")
rows=[row for row in csv_reader][1:-2]
rows2=[row[:4]+[";".join(sorted(list(set([z+"("+y+")" for z,y in zip(list(set([n.split()[0] for n in row[4].split(", ")])),row[5].split(", "))])))) if row[4] != "" else "None"]+row[6:] for row in rows]
drugbank_mappings = [[i.replace("\n","").split("\t")[0]]+[i.replace("\n","").split("\t")[1]+"("+i.replace("\n","").split("\t")[2]+")"] for i in open("/Users/nursyatila/NSAG_PART2/db/pdb_drugbank.txt","r").readlines()]
#dictionary of drugbank annotation
dict_drugbank_mappings = collections.defaultdict(list)
#dictionary of drug indication
dict_indication_mappings = collections.defaultdict(list)
for d in drugbank_mappings:
dict_drugbank_mappings[d[0]].append(d[1]+"("+d[2]+")")
dict_indication_mappings[d[0]].append(d[3])
#dictionary of source organism
dict_source_mappings = collections.defaultdict(list)
for s in rows2: dict_source_mappings[s[0].lower()+s[1]].append(s[3])
#dictionary of pdb-macromolecule mappings
dict_macromolecule_mappings = collections.defaultdict(list)
for m in rows2: dict_macromolecule_mappings[m[0].lower()+m[1]].append(m[2])
#pfam annotation
pfam_annotation = collections.defaultdict(list)
for p in rows2: pfam_annotation[p[0].lower()+p[1]].append(p[4])
csv_writer = csv.writer(open("BINDING_INTERFACES.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_ligand_id]))))
organism = ";".join(sorted(list(set([k for n in [dict_source_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).replace(",",";")
if organism == "": organism = "-"
macromolecule = ";".join(sorted(list(set([k for n in [dict_macromolecule_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).upper()
if macromolecule == "": macromolecule = "-"
pfam_id = ";".join(sorted(list(set([k for n in [pfam_annotation[pdbchain] for pdbchain in pdbchains0] for k in n]))))
compiled_details = [dreposed_id]+[pdb]+[pdb_ligand_id]+[drugbank_id]+[indications]+[organism]+[macromolecule]+[pfam_id]+[binding_residues]+[hetatm_residues] #arrangement in csv file
#print compiled_details
csv_writer.writerow(compiled_details)
csv_writer2 = csv.writer(open("BINDING_INTERFACES_CLUSTERS.csv","w",),delimiter=",")
for test_file in glob.glob("BINDING_SITES_CLUSTERS/*.pdb"):
pdbs = test_file.split("/")[-1]
pdb = pdbs[:4].lower() #pdbid
dreposed_id = pdbs.replace(".pdb","").upper() #drreposed id
binding_residues = ";".join(sorted(list(set([line[17:26] for line in open(test_file,"r") if line[:4] == "ATOM"])),key=lambda x:int(x[5:]))) #binding residues in 'ATOM' record
hetatm_residues = list(set([line[17:26] for line in open(test_file,"r").readlines() if line[:6] == "HETATM"]))[0] #drug molecule in 'HETATM' record
pdb_ligand_id = hetatm_residues[:3].replace(" ","") #pdb ligand id
pdbchains0 = sorted(list(set([pdb+line[21:22] for line in open(test_file,"r").readlines()])))
drugbank_id = ";".join(sorted(list(set(dict_drugbank_mappings[pdb_ligand_id.replace(" ","")])))) #drugbank id
if drugbank_id == "": drugbank_id = "-"
indications = ";".join(sorted(list(set(dict_indication_mappings[pdb_ligand_id]))))
organism = ";".join(sorted(list(set([k for n in [dict_source_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).replace(",",";")
if organism == "": organism = "-"
macromolecule = ";".join(sorted(list(set([k for n in [dict_macromolecule_mappings[pdbchain] for pdbchain in pdbchains0] for k in n])))).upper()
if macromolecule == "": macromolecule = "-"
pfam_id = ";".join(sorted(list(set([k for n in [pfam_annotation[pdbchain] for pdbchain in pdbchains0] for k in n]))))
compiled_details = [dreposed_id]+[pdb]+[pdb_ligand_id]+[drugbank_id]+[indications]+[organism]+[macromolecule]+[pfam_id]+[binding_residues]+[hetatm_residues] #arrangement in csv file
#print compiled_details
csv_writer.writerow(compiled_details)
def change_het_res(hetatm_residues):
if "from" in hetatm_residues:
if "site" not in hetatm_residues:
rr = hetatm_residues
het_resx = [" ".join([r.split(" from ")[1].replace("s","")[-9:]]+["("+r.split(" from ")[0].replace(" ","")+")" if "-" in r.split(" from ")[0] else "( "+r.split(" from ")[0].replace(" ","")+")" ]) if float(re.findall("[0-9]+[.][0-9]+",r.split(" from ")[0])[0])<5.0 else "None" for r in rr.split(";")]
if all(item == "None" for item in het_resx) == True: het_res = "None"
else: het_res = ";".join(het_resx)
else: het_res = "None"
else: het_res = "None"
return het_res
#BINDING_INTERFACES_ASSAM
def save_output_binding_interfaces_assam():
| pdb_dict=collections.defaultdict(list)
pdb_new_dict=collections.defaultdict(list)
pdbs_all=[["_".join(i.split("/")[-1].split("_")[:3]).upper()]+[i.split("/")[-1].replace(".pdb","")] for i in glob.glob("renew_bs_finalized/*.pdb")]
for pdb in pdbs_all: pdb_dict[pdb[0]].append(pdb[1])
for k in pdb_dict.keys():
for index, pdb in enumerate(pdb_dict[k]):
pdb_new_dict[pdb].append(k+"_"+str(index))
csv_writer=csv.writer(open("BINDING_INTERFACES_ASSAM.csv","w"),delimiter=",")
all_cs=[cs for cs in glob.glob("/Users/nursyatila/NSAG_PART2/ASS_EXE/drreposer_output/*/*/*_sum_2.csv")] for cs in all_cs:
csv_readerx=[pdb_new_dict[row[0]]+row[1:] for row in csv.reader(open(cs,"r"),delimiter=",")]
#rowsx=[pdb_new_dict[row[0]]+row[1:6]+[";".join([change_het_res(hetatm_residues) for hetatm_residues in row[6].split(";")])]+row[7:] for row in csv_readerx]
for r in csv_readerx:
csv_writer.writerow(r) | identifier_body |
|
town.py | ]]
if type(weapon) == list:
if 'Archbishop' == player.cls:
weapon = weapon[0]
else:
weapon = weapon[1]
print("Give me a moment and I will make you an ultimate weapon...")
time.sleep(5)
print("I present to you, {}, the mighty {}!".format(player.name, weapon().name))
player.modify_inventory(weapon, num=1)
del player.inventory['UNOBTAINIUM']
time.sleep(2)
def blacksmith(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Welcome to Griswold's! What can I do you for?"
if 'UNOBTAINIUM' in list(player.inventory.keys()) and player.pro_level == 3:
ultimate(player)
buy_list = [('Weapon', 0), ('OffHand', 1)]
shop(player, buy_list, shop_text)
def armory(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "I have the finest armors for sale. Come in and look around."
buy_list = [('Armor', 0)]
shop(player, buy_list, shop_text)
def alchemist(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Welcome to Ye Olde Item Shoppe."
buy_list = [('Potion', 0), ('Misc', 1)]
shop(player, buy_list, shop_text)
def jeweler(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Come glimpse the finest jewelry in the land."
buy_list = [('Accessory', 0)]
shop(player, buy_list, shop_text)
def tavern(player):
"""
Quests
"""
print("Sorry but we are closed for construction. Come back once we are open!")
time.sleep(1)
def church(player, wmap):
os.system('cls' if os.name == 'nt' else 'clear')
while True:
print("Come in my child. You are always welcome in the arms of Elysia.")
print("How can we be of service?")
church_options = [('Promotion', 0), ('Save', 1), ('Quit', 2), ('Leave', 3)]
church_index = storyline.get_response(church_options)
if church_options[church_index][1] == 0:
if player.level // 20 > 0 and player.pro_level < 3:
print("You have qualified for a promotion. Which path would you like to follow?")
classes.promotion(player)
print("Let the light of Elysia guide you on your new path.")
elif player.pro_level == 3:
print("You are at max promotion level and can no longer be promoted.")
else:
print("You need to be at least level 20 before you can promote your character.")
time.sleep(1)
elif church_options[church_index][1] == 1:
player.save(wmap) # Can only save at church in town
elif church_options[church_index][1] == 2:
player.game_quit()
elif church_options[church_index][1] == 3:
print("Let the light of Elysia guide you.")
time.sleep(1)
break
os.system('cls' if os.name == 'nt' else 'clear')
def secret_shop(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "You have found me in this god forsaken place. Since you're here, you might as well buy some supplies."
buy_list = [('Weapon', 0), ('OffHand', 1), ('Armor', 2), ('Accessory', 3), ('Potion', 4)]
shop(player, buy_list, shop_text, in_town=False)
player.location_y += 1
def shop(player, buy_list, shop_text, in_town=True):
items_dict = items.items_dict
while True:
print(shop_text)
print("You have {} gold.".format(player.gold))
print("Did you want to buy or sell?")
option_list = [('Buy', 0), ('Sell', 1), ('Leave', 2)]
opt_index = storyline.get_response(option_list)
if option_list[opt_index][0] == 'Leave':
print("We're sorry to see you go. Come back anytime!")
time.sleep(1)
break
elif option_list[opt_index][0] == 'Buy':
print("Great! What would you like to buy?")
if len(buy_list) > 1:
buy_index = storyline.get_response(buy_list)
else:
buy_index = 0
cat_list = []
i = 0
for cat in items_dict[buy_list[buy_index][0]]:
cat_list.append((cat, i))
i += 1
cat_list.append(('Go back', i))
cat_index = storyline.get_response(cat_list)
if cat_list[cat_index][0] == 'Go back':
continue
item_list = []
item_options = []
i = 0
for item in items_dict[buy_list[buy_index][0]][cat_list[cat_index][0]]:
adj_cost = max(1, int(item().value - player.charisma * 2))
if in_town:
if item().rarity < 35:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
else:
if 35 <= item().rarity <= 40:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
item_options.append(('Go back', i))
item_index = storyline.get_response(item_options)
if item_options[item_index][0] == 'Go back':
continue
buy_item = item_list[item_index]
buy_price = max(1, int(buy_item().value - (player.charisma * 2)))
if player.gold < buy_price:
print("You do not have enough gold.")
time.sleep(0.25)
else:
print("You have {} gold coins.".format(player.gold))
while True:
try:
num = int(input("How many {}s would you like to buy? ".format(buy_item().name)))
if num * buy_price > player.gold:
print("You do not have enough money for that purchase.")
elif num == 0:
break
else:
buy_price *= num
print("That will cost {} gold coins.".format(buy_price))
confirm = input("Do you still want to buy {} {}s? ".format(num, buy_item().name)).lower()
if confirm == 'y':
player.gold -= buy_price
player.modify_inventory(buy_item, num=num, sell=False)
print("{} {} will be added to your inventory.".format(num, buy_item().name))
else:
print("Sorry to hear that. Come back when you have something you wish to buy.")
break
except ValueError:
print("Please enter a valid number.")
input()
elif option_list[opt_index][0] == 'Sell':
print("We could always use more product. What do you have to sell?")
sell_list = []
i = 0
for key in player.inventory.keys():
if player.inventory[key][0]().rarity < 99:
sell_list.append((key, i))
i += 1
if len(sell_list) == 0:
print("You don't have anything to sell.")
break
sell_list.append(('Exit', i))
typ_index = storyline.get_response(sell_list)
if sell_list[typ_index][0] == 'Exit':
break
else:
sell_item = player.inventory[sell_list[typ_index][0]][0]
sell_amt = player.inventory[sell_list[typ_index][0]][1]
if sell_item().rarity >= 50:
print("Wow, that's something you don't see everyday!")
print("You have {} {} to sell.".format(sell_amt, sell_item().name))
while True:
try:
num = int(input("How many would you like to sell? "))
if num <= sell_amt and num != 0:
sale_price = int(0.5 * num * sell_item().value + (player.charisma * 2))
print("I'll give you {} gold coins for that.".format(sale_price))
confirm = input("Do you still want to sell? ").lower()
if confirm == 'y':
player.modify_inventory(player.inventory[sell_list[typ_index][0]][0], num=num,
sell=True)
player.gold += sale_price
print("You sold {} {} for {} gold.".format(num, sell_item().name, sale_price))
else:
print("I am sorry to hear that. Come back when you have something you wish to sell.")
break
elif num == 0:
break
else:
print("You cannot sell more than you have.")
except ValueError:
print("Please enter a valid number.")
input()
else:
| print("Please enter a valid option.") | conditional_block |
|
town.py | buy_list, shop_text)
def alchemist(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Welcome to Ye Olde Item Shoppe."
buy_list = [('Potion', 0), ('Misc', 1)]
shop(player, buy_list, shop_text)
def jeweler(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Come glimpse the finest jewelry in the land."
buy_list = [('Accessory', 0)]
shop(player, buy_list, shop_text)
def tavern(player):
"""
Quests
"""
print("Sorry but we are closed for construction. Come back once we are open!")
time.sleep(1)
def church(player, wmap):
os.system('cls' if os.name == 'nt' else 'clear')
while True:
print("Come in my child. You are always welcome in the arms of Elysia.")
print("How can we be of service?")
church_options = [('Promotion', 0), ('Save', 1), ('Quit', 2), ('Leave', 3)]
church_index = storyline.get_response(church_options)
if church_options[church_index][1] == 0:
if player.level // 20 > 0 and player.pro_level < 3:
print("You have qualified for a promotion. Which path would you like to follow?")
classes.promotion(player)
print("Let the light of Elysia guide you on your new path.")
elif player.pro_level == 3:
print("You are at max promotion level and can no longer be promoted.")
else:
print("You need to be at least level 20 before you can promote your character.")
time.sleep(1)
elif church_options[church_index][1] == 1:
player.save(wmap) # Can only save at church in town
elif church_options[church_index][1] == 2:
player.game_quit()
elif church_options[church_index][1] == 3:
print("Let the light of Elysia guide you.")
time.sleep(1)
break
os.system('cls' if os.name == 'nt' else 'clear')
def secret_shop(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "You have found me in this god forsaken place. Since you're here, you might as well buy some supplies."
buy_list = [('Weapon', 0), ('OffHand', 1), ('Armor', 2), ('Accessory', 3), ('Potion', 4)]
shop(player, buy_list, shop_text, in_town=False)
player.location_y += 1
def shop(player, buy_list, shop_text, in_town=True):
items_dict = items.items_dict
while True:
print(shop_text)
print("You have {} gold.".format(player.gold))
print("Did you want to buy or sell?")
option_list = [('Buy', 0), ('Sell', 1), ('Leave', 2)]
opt_index = storyline.get_response(option_list)
if option_list[opt_index][0] == 'Leave':
print("We're sorry to see you go. Come back anytime!")
time.sleep(1)
break
elif option_list[opt_index][0] == 'Buy':
print("Great! What would you like to buy?")
if len(buy_list) > 1:
buy_index = storyline.get_response(buy_list)
else:
buy_index = 0
cat_list = []
i = 0
for cat in items_dict[buy_list[buy_index][0]]:
cat_list.append((cat, i))
i += 1
cat_list.append(('Go back', i))
cat_index = storyline.get_response(cat_list)
if cat_list[cat_index][0] == 'Go back':
continue
item_list = []
item_options = []
i = 0
for item in items_dict[buy_list[buy_index][0]][cat_list[cat_index][0]]:
adj_cost = max(1, int(item().value - player.charisma * 2))
if in_town:
if item().rarity < 35:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
else:
if 35 <= item().rarity <= 40:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
item_options.append(('Go back', i))
item_index = storyline.get_response(item_options)
if item_options[item_index][0] == 'Go back':
continue
buy_item = item_list[item_index]
buy_price = max(1, int(buy_item().value - (player.charisma * 2)))
if player.gold < buy_price:
print("You do not have enough gold.")
time.sleep(0.25)
else:
print("You have {} gold coins.".format(player.gold))
while True:
try:
num = int(input("How many {}s would you like to buy? ".format(buy_item().name)))
if num * buy_price > player.gold:
print("You do not have enough money for that purchase.")
elif num == 0:
break
else:
buy_price *= num
print("That will cost {} gold coins.".format(buy_price))
confirm = input("Do you still want to buy {} {}s? ".format(num, buy_item().name)).lower()
if confirm == 'y':
player.gold -= buy_price
player.modify_inventory(buy_item, num=num, sell=False)
print("{} {} will be added to your inventory.".format(num, buy_item().name))
else:
print("Sorry to hear that. Come back when you have something you wish to buy.")
break
except ValueError:
print("Please enter a valid number.")
input()
elif option_list[opt_index][0] == 'Sell':
print("We could always use more product. What do you have to sell?")
sell_list = []
i = 0
for key in player.inventory.keys():
if player.inventory[key][0]().rarity < 99:
sell_list.append((key, i))
i += 1
if len(sell_list) == 0:
print("You don't have anything to sell.")
break
sell_list.append(('Exit', i))
typ_index = storyline.get_response(sell_list)
if sell_list[typ_index][0] == 'Exit':
break
else:
sell_item = player.inventory[sell_list[typ_index][0]][0]
sell_amt = player.inventory[sell_list[typ_index][0]][1]
if sell_item().rarity >= 50:
print("Wow, that's something you don't see everyday!")
print("You have {} {} to sell.".format(sell_amt, sell_item().name))
while True:
try:
num = int(input("How many would you like to sell? "))
if num <= sell_amt and num != 0:
sale_price = int(0.5 * num * sell_item().value + (player.charisma * 2))
print("I'll give you {} gold coins for that.".format(sale_price))
confirm = input("Do you still want to sell? ").lower()
if confirm == 'y':
player.modify_inventory(player.inventory[sell_list[typ_index][0]][0], num=num,
sell=True)
player.gold += sale_price
print("You sold {} {} for {} gold.".format(num, sell_item().name, sale_price))
else:
print("I am sorry to hear that. Come back when you have something you wish to sell.")
break
elif num == 0:
break
else:
print("You cannot sell more than you have.")
except ValueError:
print("Please enter a valid number.")
input()
else:
print("Please enter a valid option.")
time.sleep(1)
os.system('cls' if os.name == 'nt' else 'clear')
def town(player, wmap):
os.system('cls' if os.name == 'nt' else 'clear')
locations = [blacksmith, armory, alchemist, jeweler, church, tavern]
town_options = [('Blacksmith', 0), ('Armory', 1), ('Alchemist', 2), ('Jeweler', 3), ('Church', 4), ('Tavern', 5),
('Dungeon', 6), ('Status', 7)]
while True:
print("Welcome to the town of Silvana!")
print("Where would you like to go?")
town_index = storyline.get_response(town_options)
if town_options[town_index][0] == 'Dungeon':
print("You descend into the dungeon.")
time.sleep(1)
player.location_x, player.location_y, player.location_z = (5, 10, 1)
break
elif town_options[town_index][0] == 'Status': | player.status()
elif town_options[town_index][0] == 'Church':
locations[town_index](player, wmap) | random_line_split |
|
town.py | Hammer': items.Skullcrusher,
'Ninja Blades': items.Ninjato}
make_list = []
i = 0
for typ, weapon in weapon_list.items():
if typ == 'Staff':
if 'Archbishop' == player.cls:
weapon = weapon[0]
else:
weapon = weapon[1]
if classes.equip_check(weapon, 'Weapon', player.cls):
make_list.append((typ, i))
i += 1
make_list.append(('Not yet', i))
while True:
print('What type of weapon would you like me to make?')
weapon_ind = storyline.get_response(make_list)
break
if make_list[weapon_ind][0] == 'Not yet':
print("I am sorry to hear that...please come back if you change your mind.")
else:
weapon = weapon_list[make_list[weapon_ind][0]]
if type(weapon) == list:
if 'Archbishop' == player.cls:
weapon = weapon[0]
else:
weapon = weapon[1]
print("Give me a moment and I will make you an ultimate weapon...")
time.sleep(5)
print("I present to you, {}, the mighty {}!".format(player.name, weapon().name))
player.modify_inventory(weapon, num=1)
del player.inventory['UNOBTAINIUM']
time.sleep(2)
def blacksmith(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Welcome to Griswold's! What can I do you for?"
if 'UNOBTAINIUM' in list(player.inventory.keys()) and player.pro_level == 3:
ultimate(player)
buy_list = [('Weapon', 0), ('OffHand', 1)]
shop(player, buy_list, shop_text)
def armory(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "I have the finest armors for sale. Come in and look around."
buy_list = [('Armor', 0)]
shop(player, buy_list, shop_text)
def alchemist(player):
|
def jeweler(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Come glimpse the finest jewelry in the land."
buy_list = [('Accessory', 0)]
shop(player, buy_list, shop_text)
def tavern(player):
"""
Quests
"""
print("Sorry but we are closed for construction. Come back once we are open!")
time.sleep(1)
def church(player, wmap):
os.system('cls' if os.name == 'nt' else 'clear')
while True:
print("Come in my child. You are always welcome in the arms of Elysia.")
print("How can we be of service?")
church_options = [('Promotion', 0), ('Save', 1), ('Quit', 2), ('Leave', 3)]
church_index = storyline.get_response(church_options)
if church_options[church_index][1] == 0:
if player.level // 20 > 0 and player.pro_level < 3:
print("You have qualified for a promotion. Which path would you like to follow?")
classes.promotion(player)
print("Let the light of Elysia guide you on your new path.")
elif player.pro_level == 3:
print("You are at max promotion level and can no longer be promoted.")
else:
print("You need to be at least level 20 before you can promote your character.")
time.sleep(1)
elif church_options[church_index][1] == 1:
player.save(wmap) # Can only save at church in town
elif church_options[church_index][1] == 2:
player.game_quit()
elif church_options[church_index][1] == 3:
print("Let the light of Elysia guide you.")
time.sleep(1)
break
os.system('cls' if os.name == 'nt' else 'clear')
def secret_shop(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "You have found me in this god forsaken place. Since you're here, you might as well buy some supplies."
buy_list = [('Weapon', 0), ('OffHand', 1), ('Armor', 2), ('Accessory', 3), ('Potion', 4)]
shop(player, buy_list, shop_text, in_town=False)
player.location_y += 1
def shop(player, buy_list, shop_text, in_town=True):
items_dict = items.items_dict
while True:
print(shop_text)
print("You have {} gold.".format(player.gold))
print("Did you want to buy or sell?")
option_list = [('Buy', 0), ('Sell', 1), ('Leave', 2)]
opt_index = storyline.get_response(option_list)
if option_list[opt_index][0] == 'Leave':
print("We're sorry to see you go. Come back anytime!")
time.sleep(1)
break
elif option_list[opt_index][0] == 'Buy':
print("Great! What would you like to buy?")
if len(buy_list) > 1:
buy_index = storyline.get_response(buy_list)
else:
buy_index = 0
cat_list = []
i = 0
for cat in items_dict[buy_list[buy_index][0]]:
cat_list.append((cat, i))
i += 1
cat_list.append(('Go back', i))
cat_index = storyline.get_response(cat_list)
if cat_list[cat_index][0] == 'Go back':
continue
item_list = []
item_options = []
i = 0
for item in items_dict[buy_list[buy_index][0]][cat_list[cat_index][0]]:
adj_cost = max(1, int(item().value - player.charisma * 2))
if in_town:
if item().rarity < 35:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
else:
if 35 <= item().rarity <= 40:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
item_options.append(('Go back', i))
item_index = storyline.get_response(item_options)
if item_options[item_index][0] == 'Go back':
continue
buy_item = item_list[item_index]
buy_price = max(1, int(buy_item().value - (player.charisma * 2)))
if player.gold < buy_price:
print("You do not have enough gold.")
time.sleep(0.25)
else:
print("You have {} gold coins.".format(player.gold))
while True:
try:
num = int(input("How many {}s would you like to buy? ".format(buy_item().name)))
if num * buy_price > player.gold:
print("You do not have enough money for that purchase.")
elif num == 0:
break
else:
buy_price *= num
print("That will cost {} gold coins.".format(buy_price))
confirm = input("Do you still want to buy {} {}s? ".format(num, buy_item().name)).lower()
if confirm == 'y':
player.gold -= buy_price
player.modify_inventory(buy_item, num=num, sell=False)
print("{} {} will be added to your inventory.".format(num, buy_item().name))
else:
print("Sorry to hear that. Come back when you have something you wish to buy.")
break
except ValueError:
print("Please enter a valid number.")
input()
elif option_list[opt_index][0] == 'Sell':
print("We could always use more product. What do you have to sell?")
sell_list = []
i = 0
for key in player.inventory.keys():
if player.inventory[key][0]().rarity < 99:
sell_list.append((key, i))
i += 1
if len(sell_list) == 0:
print("You don't have anything to sell.")
break
sell_list.append(('Exit', i))
typ_index = storyline.get_response(sell_list)
if sell_list[typ_index][0] == 'Exit':
break
else:
sell_item = player.inventory[sell_list[typ_index][0]][0]
sell_amt = player.inventory[sell_list[typ_index][0]][1]
if sell_item().rarity >= 50:
print("Wow, that's something you don't see everyday!")
print("You have {} {} to sell.".format(sell_amt, sell_item().name))
while True:
try:
num = int(input("How many would you like to sell? "))
if num <= sell_amt and num != | os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Welcome to Ye Olde Item Shoppe."
buy_list = [('Potion', 0), ('Misc', 1)]
shop(player, buy_list, shop_text) | identifier_body |
town.py | Hammer': items.Skullcrusher,
'Ninja Blades': items.Ninjato}
make_list = []
i = 0
for typ, weapon in weapon_list.items():
if typ == 'Staff':
if 'Archbishop' == player.cls:
weapon = weapon[0]
else:
weapon = weapon[1]
if classes.equip_check(weapon, 'Weapon', player.cls):
make_list.append((typ, i))
i += 1
make_list.append(('Not yet', i))
while True:
print('What type of weapon would you like me to make?')
weapon_ind = storyline.get_response(make_list)
break
if make_list[weapon_ind][0] == 'Not yet':
print("I am sorry to hear that...please come back if you change your mind.")
else:
weapon = weapon_list[make_list[weapon_ind][0]]
if type(weapon) == list:
if 'Archbishop' == player.cls:
weapon = weapon[0]
else:
weapon = weapon[1]
print("Give me a moment and I will make you an ultimate weapon...")
time.sleep(5)
print("I present to you, {}, the mighty {}!".format(player.name, weapon().name))
player.modify_inventory(weapon, num=1)
del player.inventory['UNOBTAINIUM']
time.sleep(2)
def blacksmith(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Welcome to Griswold's! What can I do you for?"
if 'UNOBTAINIUM' in list(player.inventory.keys()) and player.pro_level == 3:
ultimate(player)
buy_list = [('Weapon', 0), ('OffHand', 1)]
shop(player, buy_list, shop_text)
def | (player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "I have the finest armors for sale. Come in and look around."
buy_list = [('Armor', 0)]
shop(player, buy_list, shop_text)
def alchemist(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Welcome to Ye Olde Item Shoppe."
buy_list = [('Potion', 0), ('Misc', 1)]
shop(player, buy_list, shop_text)
def jeweler(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "Come glimpse the finest jewelry in the land."
buy_list = [('Accessory', 0)]
shop(player, buy_list, shop_text)
def tavern(player):
"""
Quests
"""
print("Sorry but we are closed for construction. Come back once we are open!")
time.sleep(1)
def church(player, wmap):
os.system('cls' if os.name == 'nt' else 'clear')
while True:
print("Come in my child. You are always welcome in the arms of Elysia.")
print("How can we be of service?")
church_options = [('Promotion', 0), ('Save', 1), ('Quit', 2), ('Leave', 3)]
church_index = storyline.get_response(church_options)
if church_options[church_index][1] == 0:
if player.level // 20 > 0 and player.pro_level < 3:
print("You have qualified for a promotion. Which path would you like to follow?")
classes.promotion(player)
print("Let the light of Elysia guide you on your new path.")
elif player.pro_level == 3:
print("You are at max promotion level and can no longer be promoted.")
else:
print("You need to be at least level 20 before you can promote your character.")
time.sleep(1)
elif church_options[church_index][1] == 1:
player.save(wmap) # Can only save at church in town
elif church_options[church_index][1] == 2:
player.game_quit()
elif church_options[church_index][1] == 3:
print("Let the light of Elysia guide you.")
time.sleep(1)
break
os.system('cls' if os.name == 'nt' else 'clear')
def secret_shop(player):
os.system('cls' if os.name == 'nt' else 'clear')
shop_text = "You have found me in this god forsaken place. Since you're here, you might as well buy some supplies."
buy_list = [('Weapon', 0), ('OffHand', 1), ('Armor', 2), ('Accessory', 3), ('Potion', 4)]
shop(player, buy_list, shop_text, in_town=False)
player.location_y += 1
def shop(player, buy_list, shop_text, in_town=True):
items_dict = items.items_dict
while True:
print(shop_text)
print("You have {} gold.".format(player.gold))
print("Did you want to buy or sell?")
option_list = [('Buy', 0), ('Sell', 1), ('Leave', 2)]
opt_index = storyline.get_response(option_list)
if option_list[opt_index][0] == 'Leave':
print("We're sorry to see you go. Come back anytime!")
time.sleep(1)
break
elif option_list[opt_index][0] == 'Buy':
print("Great! What would you like to buy?")
if len(buy_list) > 1:
buy_index = storyline.get_response(buy_list)
else:
buy_index = 0
cat_list = []
i = 0
for cat in items_dict[buy_list[buy_index][0]]:
cat_list.append((cat, i))
i += 1
cat_list.append(('Go back', i))
cat_index = storyline.get_response(cat_list)
if cat_list[cat_index][0] == 'Go back':
continue
item_list = []
item_options = []
i = 0
for item in items_dict[buy_list[buy_index][0]][cat_list[cat_index][0]]:
adj_cost = max(1, int(item().value - player.charisma * 2))
if in_town:
if item().rarity < 35:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
else:
if 35 <= item().rarity <= 40:
item_options.append((item().name + ' ' + str(adj_cost), i))
item_list.append(item)
i += 1
item_options.append(('Go back', i))
item_index = storyline.get_response(item_options)
if item_options[item_index][0] == 'Go back':
continue
buy_item = item_list[item_index]
buy_price = max(1, int(buy_item().value - (player.charisma * 2)))
if player.gold < buy_price:
print("You do not have enough gold.")
time.sleep(0.25)
else:
print("You have {} gold coins.".format(player.gold))
while True:
try:
num = int(input("How many {}s would you like to buy? ".format(buy_item().name)))
if num * buy_price > player.gold:
print("You do not have enough money for that purchase.")
elif num == 0:
break
else:
buy_price *= num
print("That will cost {} gold coins.".format(buy_price))
confirm = input("Do you still want to buy {} {}s? ".format(num, buy_item().name)).lower()
if confirm == 'y':
player.gold -= buy_price
player.modify_inventory(buy_item, num=num, sell=False)
print("{} {} will be added to your inventory.".format(num, buy_item().name))
else:
print("Sorry to hear that. Come back when you have something you wish to buy.")
break
except ValueError:
print("Please enter a valid number.")
input()
elif option_list[opt_index][0] == 'Sell':
print("We could always use more product. What do you have to sell?")
sell_list = []
i = 0
for key in player.inventory.keys():
if player.inventory[key][0]().rarity < 99:
sell_list.append((key, i))
i += 1
if len(sell_list) == 0:
print("You don't have anything to sell.")
break
sell_list.append(('Exit', i))
typ_index = storyline.get_response(sell_list)
if sell_list[typ_index][0] == 'Exit':
break
else:
sell_item = player.inventory[sell_list[typ_index][0]][0]
sell_amt = player.inventory[sell_list[typ_index][0]][1]
if sell_item().rarity >= 50:
print("Wow, that's something you don't see everyday!")
print("You have {} {} to sell.".format(sell_amt, sell_item().name))
while True:
try:
num = int(input("How many would you like to sell? "))
if num <= sell_amt and num != | armory | identifier_name |
impl_encryption.rs | Some("0C000000789CCB48CDC9C95728CF2F32303402001D8004202E"),
Some(12),
),
(Some("020000000000"), Some(2)),
(Some("0000000001"), Some(0)),
(
Some("02000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(2),
),
(Some("010203"), Some(0)),
(Some("01020304"), Some(0)),
(None, None),
];
for (s, exp) in cases {
let s = s.map(|inner| hex::decode(inner.as_bytes().to_vec()).unwrap());
let output = RpnFnScalarEvaluator::new()
.push_param(s)
.evaluate(ScalarFuncSig::UncompressedLength)
.unwrap();
assert_eq!(output, exp);
}
}
#[test]
fn test_sha2() {
let cases = vec![
("pingcap", 0, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 224, "cd036dc9bec69e758401379c522454ea24a6327b48724b449b40c6b7"),
("pingcap", 256, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 384, "c50955b6b0c7b9919740d956849eedcb0f0f90bf8a34e8c1f4e071e3773f53bd6f8f16c04425ff728bed04de1b63db51"),
("pingcap", 512, "ea903c574370774c4844a83b7122105a106e04211673810e1baae7c2ae7aba2cf07465e02f6c413126111ef74a417232683ce7ba210052e63c15fc82204aad80"),
("13572468", 0, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468", 224, "8ad67735bbf49576219f364f4640d595357a440358d15bf6815a16e4"),
("13572468", 256, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468.123", 384, "3b4ee302435dc1e15251efd9f3982b1ca6fe4ac778d3260b7bbf3bea613849677eda830239420e448e4c6dc7c2649d89"),
("13572468.123", 512, "4820aa3f2760836557dc1f2d44a0ba7596333fdb60c8a1909481862f4ab0921c00abb23d57b7e67a970363cc3fcb78b25b6a0d45cdcac0e87aa0c96bc51f7f96"),
];
for (input_str, hash_length_i64, exp_str) in cases {
let exp = Some(Bytes::from(exp_str));
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input_str)))
.push_param(Some(Int::from(hash_length_i64)))
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap();
assert_eq!(got, exp, "sha2('{:?}', {:?})", input_str, hash_length_i64);
}
let null_cases = vec![
(ScalarValue::Bytes(None), ScalarValue::Int(Some(1))),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(None),
),
(ScalarValue::Bytes(None), ScalarValue::Int(None)),
(
ScalarValue::Bytes(Some(b"pingcap".to_vec())),
ScalarValue::Int(Some(-1)),
),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(Some(999)),
),
];
for (input_str, hash_length_i64) in null_cases {
assert!(RpnFnScalarEvaluator::new()
.push_param(input_str)
.push_param(hash_length_i64)
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap()
.is_none())
}
}
#[test]
fn test_random_bytes() {
let cases = vec![1, 32, 233, 1024];
for len in cases {
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Int::from(len as i64)))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap();
assert_eq!(got.unwrap().len(), len);
}
let overflow_tests = vec![
ScalarValue::Int(Some(-32)),
ScalarValue::Int(Some(1025)),
ScalarValue::Int(Some(0)),
];
for len in overflow_tests {
assert!(RpnFnScalarEvaluator::new()
.push_param(len)
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.is_err(),);
}
//test NULL case
assert!(RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Int(None))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap()
.is_none())
}
#[test]
fn test_password() {
let cases = vec![
("Ti | KV", "*cca644408381f962dba8dfb9889db1371ee74208"),
("Pingcap", "*f33bc75eac70ac317621fbbfa560d6251c43cf8a"),
("rust", "*090c2b08e0c1776910e777b917c2185be6554c2e"),
("database", "*02e86b4af5219d0ba6c974908aea62d42eb7da24"),
("raft", "*b23a77787ed44e62ef2570f03ce8982d119fb699"),
];
for (input, output) in cases {
let res = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input)))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(res, Some(Bytes::from(output)))
}
// test for null
let res = RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Bytes(None))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap(); | identifier_body |
|
impl_encryption.rs | Result<Bytes> {
hash::hash(hashtype, input)
.map(|digest| hex::encode(digest).into_bytes())
.map_err(|e| box_err!("OpenSSL error: {:?}", e))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn uncompressed_length(ctx: &mut EvalContext, arg: Option<BytesRef>) -> Result<Option<Int>> {
use byteorder::{ByteOrder, LittleEndian};
Ok(arg.as_ref().map(|s| {
if s.is_empty() {
0
} else if s.len() <= 4 {
ctx.warnings.append_warning(Error::zlib_data_corrupted());
0
} else {
Int::from(LittleEndian::read_u32(&s[0..4]))
}
}))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn random_bytes(_ctx: &mut EvalContext, arg: Option<&Int>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => {
if *arg < 1 || *arg > MAX_RAND_BYTES_LENGTH {
return Err(Error::overflow("length", "random_bytes").into());
}
Ok(Some(gen_random_bytes(*arg as usize)))
}
_ => Ok(None),
}
}
#[cfg(test)]
mod tests {
use tipb::ScalarFuncSig;
use super::*;
use crate::types::test_util::RpnFnScalarEvaluator;
fn test_unary_func_ok_none<'a, I: EvaluableRef<'a>, O: EvaluableRet>(sig: ScalarFuncSig)
where
O: PartialEq,
Option<I>: Into<ScalarValue>,
Option<O>: From<ScalarValue>,
{
assert_eq!(
None,
RpnFnScalarEvaluator::new()
.push_param(Option::<I>::None)
.evaluate::<O>(sig)
.unwrap()
);
}
#[test]
fn test_md5() {
let test_cases = vec![ | (b"abc".to_vec(), "900150983cd24fb0d6963f7d28e17f72"),
(b"123".to_vec(), "202cb962ac59075b964b07152d234b70"),
(
"你好".as_bytes().to_vec(),
"7eca689f0d3389d9dea66ae112e5cfd7",
),
(
"分布式データベース".as_bytes().to_vec(),
"63c0354797bd261e2cbf8581147eeeda",
),
(vec![0xc0, 0x80], "b26555f33aedac7b2684438cc5d4d05e"),
(vec![0xED, 0xA0, 0x80], "546d3dc8de10fbf8b448f678a47901e4"),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Md5)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Md5);
}
#[test]
fn test_sha1() {
let test_cases = vec![
(vec![], "da39a3ee5e6b4b0d3255bfef95601890afd80709"),
(b"a".to_vec(), "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"),
(b"ab".to_vec(), "da23614e02469a0d7c7bd1bdab5c9c474b1904dc"),
(b"abc".to_vec(), "a9993e364706816aba3e25717850c26c9cd0d89d"),
(b"123".to_vec(), "40bd001563085fc35165329ea1ff5c5ecbdbbeef"),
(
"你好".as_bytes().to_vec(),
"440ee0853ad1e99f962b63e459ef992d7c211722",
),
(
"分布式データベース".as_bytes().to_vec(),
"82aa64080df2ca37550ddfc3419d75ac1df3e0d0",
),
(vec![0xc0, 0x80], "8bf4822782a21d7ac68ece130ac36987548003bd"),
(
vec![0xED, 0xA0, 0x80],
"10db70ec072d000c68dd95879f9b831e43a859fd",
),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Sha1)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Sha1);
}
#[test]
fn test_uncompressed_length() {
let cases = vec![
(Some(""), Some(0)),
(
Some("0B000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(11),
),
(
Some("0C000000789CCB48CDC9C95728CF2F32303402001D8004202E"),
Some(12),
),
(Some("020000000000"), Some(2)),
(Some("0000000001"), Some(0)),
(
Some("02000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(2),
),
(Some("010203"), Some(0)),
(Some("01020304"), Some(0)),
(None, None),
];
for (s, exp) in cases {
let s = s.map(|inner| hex::decode(inner.as_bytes().to_vec()).unwrap());
let output = RpnFnScalarEvaluator::new()
.push_param(s)
.evaluate(ScalarFuncSig::UncompressedLength)
.unwrap();
assert_eq!(output, exp);
}
}
#[test]
fn test_sha2() {
let cases = vec![
("pingcap", 0, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 224, "cd036dc9bec69e758401379c522454ea24a6327b48724b449b40c6b7"),
("pingcap", 256, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 384, "c50955b6b0c7 | (vec![], "d41d8cd98f00b204e9800998ecf8427e"),
(b"a".to_vec(), "0cc175b9c0f1b6a831c399e269772661"),
(b"ab".to_vec(), "187ef4436122d1cc2f40dc2b92f0eba0"), | random_line_split |
impl_encryption.rs | <Bytes> {
hash::hash(hashtype, input)
.map(|digest| hex::encode(digest).into_bytes())
.map_err(|e| box_err!("OpenSSL error: {:?}", e))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn uncompressed_length(ctx: &mut EvalContext, arg: Option<BytesRef>) -> Result<Option<Int>> {
use byteorder::{ByteOrder, LittleEndian};
Ok(arg.as_ref().map(|s| {
if s.is_empty() {
0
} else if s.len() <= 4 {
ctx.warnings.append_warning(Error::zlib_data_corrupted());
0
} else {
Int::from(LittleEndian::read_u32(&s[0..4]))
}
}))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn random_bytes(_ctx: &mut EvalContext, arg: Option<&Int>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => {
if *arg < 1 || *arg > MAX_RAND_BYTES_LENGTH {
return Err(Error::overflow("length", "random_bytes").into());
}
Ok(Some(gen_random_bytes(*arg as usize)))
}
_ => Ok(None),
}
}
#[cfg(test)]
mod tests {
use tipb::ScalarFuncSig;
use super::*;
use crate::types::test_util::RpnFnScalarEvaluator;
fn test_unary_func_ok_none<'a, I: EvaluableRef<'a>, O: EvaluableRet>(sig: ScalarFuncSig)
where
O: PartialEq,
Option<I>: Into<ScalarValue>,
Option<O>: From<ScalarValue>,
{
assert_eq!(
None,
RpnFnScalarEvaluator::new()
.push_param(Option::<I>::None)
.evaluate::<O>(sig)
.unwrap()
);
}
#[test]
fn test_md5() {
let test_cases = vec![
(vec![], "d41d8cd98f00b204e9800998ecf8427e"),
(b"a".to_vec(), "0cc175b9c0f1b6a831c399e269772661"),
(b"ab".to_vec(), "187ef4436122d1cc2f40dc2b92f0eba0"),
(b"abc".to_vec(), "900150983cd24fb0d6963f7d28e17f72"),
(b"123".to_vec(), "202cb962ac59075b964b07152d234b70"),
(
"你好".as_bytes().to_vec(),
"7eca689f0d3389d9dea66ae112e5cfd7",
),
(
"分布式データベース".as_bytes().to_vec(),
"63c0354797bd261e2cbf8581147eeeda",
),
(vec![0xc0, 0x80], "b26555f33aedac7b2684438cc5d4d05e"),
(vec![0xED, 0xA0, 0x80], "546d3dc8de10fbf8b448f678a47901e4"),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Md5)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Md5);
}
#[test]
fn test_sha1() {
let test_cases = vec![
(vec![], "da39a3ee5e6b4b0d3255bfef95601890afd80709"),
(b"a".to_vec(), "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"),
(b"ab".to_vec(), "da23614e02469a0d7c7bd1bdab5c9c474b1904dc"),
(b"abc".to_vec(), "a9993e364706816aba3e25717850c26c9cd0d89d"),
(b"123".to_vec(), "40bd001563085fc35165329ea1ff5c5ecbdbbeef"),
(
"你好".as_bytes().to_vec(),
"440ee0853ad1e99f962b63e459ef992d7c211722",
),
(
"分布式データベース".as_bytes().to_vec(),
"82aa64080df2ca37550ddfc3419d75ac1df3e0d0",
),
(vec![0xc0, 0x80], "8bf4822782a21d7ac68ece130ac36987548003bd"),
(
vec![0xED, 0xA0, 0x80],
"10db70ec072d000c68dd95879f9b831e43a859fd",
),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Sha1)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Sha1);
}
#[test]
fn test_uncompressed_length() {
let cases = vec![
(Some(""), Some(0)),
(
Some("0B000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(11),
),
(
Some("0C000000789CCB48CDC9C95728CF2F32303402001D8004202E"),
Some(12),
),
(Some("020000000000"), Some(2)),
(Some("0000000001"), Some(0)),
(
Some("02000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(2),
),
(Some("010203"), Some(0)),
(Some("01020304"), Some(0)),
(None, None),
];
for (s, exp) in cases {
let s = s.map(|inner| hex::decode(inner.as_bytes().to_vec()).unwrap());
let output = RpnFnScalarEvaluator::new()
.push_param(s)
.evaluate(ScalarFuncSig::UncompressedLength)
.unwrap();
assert_eq!(output, exp);
}
}
#[test]
fn test_sha2() {
let cases = vec![
| "pingcap", 0, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 224, "cd036dc9bec69e758401379c522454ea24a6327b48724b449b40c6b7"),
("pingcap", 256, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 384, "c50955b6b0c | ( | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.