file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
script.js | 122.463701, 37.747683]) //-122.41, 37.79
.scale(width*200)
.translate([width / 2, height / 2]);
var path = d3.geo.path()
.projection(projection);
var ndx, yearDim, mhiDim, mgrDim, tractDim;
var filterYear, filterMetric, sf_topojson;
var parsed_data = [];
var parsed_biz = [];
var parsed_biz_tract = [];
var divlegend = document.getElementById("legend");
var maptitle = document.getElementById("yolo");
var filterMetric_mapping = {"mhi": "Mean Household Income", "mgr": "Median Gross Rent"};
var format = d3.format("$.2s");
function clickedOn(evt) {
var oldFilterMetric = filterMetric;
filterMetric = evt.target.id;
if (filterMetric !== "mhi" && filterMetric !== "mgr") {
filterYear = +(filterMetric.split("_")[1]);
filterMetric = oldFilterMetric;
metricChange(true);
var titleMetric = maptitle.innerHTML.split(" ").slice(0, 3);
titleMetric.push(filterYear);
maptitle.innerHTML = titleMetric.join(" ");
} else {
metricChange(false);
var titleMetric = maptitle.innerHTML.split(" ").slice(3, 4);
var new_metric = full_title[filterMetric].split(" ").reverse();
new_metric.forEach(function(d) {
titleMetric.unshift(d);
});
maptitle.innerHTML = titleMetric.join(" ");
}
}
divlegend.addEventListener("click", clickedOn, false);
function metricChange(yearChange) {
var specColors = color(filterMetric);
var specDomains = domains[filterMetric];
map.valueAccessor( function(d) {
if (filterMetric === "mhi") {
return d.value.mhi;
} else if (filterMetric === "pov") {
return d.value.pov;
} else { //default on mgr
return d.value.mgr;
}
})
.colors(d3.scale.threshold().domain(specDomains).range(specColors));
if (yearChange) {
var new_year_group = metricYearChange_map();
map.group(new_year_group);
plot_biz(yearChange);
}
// var new_metric_group = metricChange_timeline();
// timeline.group(new_metric_group)
// .yAxisLabel(function(d) {
// if (filterMetric === "mhi") {
// return "Mean Household Income";
// } else if (filterMetric === "pov") {
// return d.value.pov;
// } else { //default on mgr
// return "Median Gross Rent";
// }});
dc.redrawAll();
}
queue()
.defer(d3.json, "/data/sf_tracts.json")
.defer(d3.csv, "/data/year_ordered_acs_inc_rent_biz.csv")
.defer(d3.csv, "/data/groc_liq.csv")
.await(ready);
var data_real;
function ready(error, tracts, data, biz) | d.end_date = dateFormat.parse(d.end_date);
d.start_date = dateFormat.parse(d.start_date);
d.storetype = d.storetype;
d.lat = +d.lat;
d.lon = +d.lon;
parsed_biz.push({"end_date": d.end_date, "storetype": d.storetype, "lat": d.lat, "lon": d.lon, "start_date": d.start_date});
});
ndx = crossfilter(parsed_data);
yearDim = ndx.dimension(function(d) { return [d.id, +d.year.getFullYear()]; });
tractDim = ndx.dimension(function(d) { return d.id; });
vector = svg.append("g")
.attr("class", "vector")
.call(renderTiles);
d3.select(".loading").remove();
render(filterYear, filterMetric);
plot_biz(false);
}
function sort_group(group, order) {
return {
all: function() {
var g = group.all(), map = {};
g.forEach(function(kv) {
map[kv.key] = kv.value;
});
return order.map(function(k) {
return {key: k, value: map[k]};
});
}
};
};
function render(filterYear, filterMetric) {
// dc.filterAll();
var metric_grouped = metricYearChange_map();
var year_grouped = metricChange_timeline();
var hg_grouped = yearDim.group(function(d) { return d;}).reduceSum(
function(d) {
return d.high_end_groc;
});
var lg_grouped = yearDim.group(function(d) { return d;}).reduceSum(
function(d) {
return d.low_end_groc;
});
var order = parsed_data.map(function(values) {
return [values.id, +values.year.getFullYear()];
});
//
// var hg_grouped = sort_group(hg_grouped, order);
// var lg_grouped = sort_group(lg_grouped, order)
var specColors = color(filterMetric);
var specDomains = domains[filterMetric];
map.projection(projection)
.dimension(tractDim)
.group(metric_grouped)
.valueAccessor(function(d) {
console.log(filterMetric_mapping[filterMetric]);
if (filterMetric === "mhi") {
return d.value.mhi;
} else if (filterMetric === "pov") {
return d.value.pov;
} else { //default on mgr
return d.value.mgr;
}
})
.colorAccessor(function(d) {
return d;
})
.overlayGeoJson(sf_topojson, 'sf', function(d) { return d.id; })
.colors(d3.scale.threshold().domain(specDomains).range(specColors))
.title(function (d) {
return "Census Tract " + d.key + "\n" + filterMetric_mapping[filterMetric] + " : " + format(d.value ? d.value : 0);
})
.transitionDuration(500);
// timeline.width(670)
// .height(300)
// .margins({top: 10, right: 50, bottom: 30, left: 50})
// .dimension(yearDim)
// .group(year_grouped)
// .keyAccessor(function(d) { return +d.key[1]; })
// .valueAccessor(function(d) {
// return +d.value;
// })
// .seriesAccessor(function(d) { return d.key[0]; })
// .x(d3.scale.linear().domain([2000, 2014]))
// .renderHorizontalGridLines(true)
// .xAxisLabel("Year")
// .yAxisLabel(function(d) {
// if (filterMetric === "mhi") {
// return "Mean Household Income";
// } else { //default on mgr
// return "Median Gross Rent";
// }
// })
// .clipPadding(10)
// .elasticY(true)
// .brushOn(false)
// .xAxis().ticks(8).tickFormat(d3.format("d"));
//
// biz_timeline.width(470)
// .height(300)
// .margins({top: 10, right: 50, bottom: 30, left: 30})
// .dimension(yearDim)
// .group(hg_grouped)
// .keyAccessor(function(d) { return +d.key[1]; })
// .valueAccessor(function(d) {
// return +d.value;
// })
// .seriesAccessor(function(d) { return d.key[0]; })
// .x(d3.scale.linear().domain([2000, 2014]))
// .renderHorizontalGridLines(true)
// .xAxisLabel("Year")
// .yAxisLabel("Number of '$$'+ Grocery Stores")
// .clipPadding(10)
// .elasticY(true)
// .brushOn(false)
// .xAxis().ticks(8).tickFormat(d3.format("d"));
//
// biz2_timeline.width(470)
// .height(300)
// .margins({top: 10, right: 50, bottom: 30, left: 30})
// .dimension(yearDim | {
if (error)
throw error;
sf_topojson = topojson.feature(tracts, tracts.objects.sf).features;
filterMetric = "mgr";
filterYear = 2014;
svg.style("stroke", "black").style("fill", "lightgrey").style("stroke-width", "1px");
var dateFormat;
data.forEach(function(d) {
dateFormat = d3.time.format('%Y');
d.year = dateFormat.parse(d.year);
d.mhi = +d.mhi;
d.mgr = +d.mgr;
parsed_data.push({"id": d.id, "year": d.year, "mhi": d.mhi, "mgr": d.mgr, "liq": +d.liq, "high_end_groc": +d.high_end_groc, "low_end_groc": +d.low_end_groc, "groc": +d.groc});
});
biz.forEach(function(d) { | identifier_body |
script.js | return d.value.mgr;
}
})
.colors(d3.scale.threshold().domain(specDomains).range(specColors));
if (yearChange) {
var new_year_group = metricYearChange_map();
map.group(new_year_group);
plot_biz(yearChange);
}
// var new_metric_group = metricChange_timeline();
// timeline.group(new_metric_group)
// .yAxisLabel(function(d) {
// if (filterMetric === "mhi") {
// return "Mean Household Income";
// } else if (filterMetric === "pov") {
// return d.value.pov;
// } else { //default on mgr
// return "Median Gross Rent";
// }});
dc.redrawAll();
}
queue()
.defer(d3.json, "/data/sf_tracts.json")
.defer(d3.csv, "/data/year_ordered_acs_inc_rent_biz.csv")
.defer(d3.csv, "/data/groc_liq.csv")
.await(ready);
var data_real;
function ready(error, tracts, data, biz) {
if (error)
throw error;
sf_topojson = topojson.feature(tracts, tracts.objects.sf).features;
filterMetric = "mgr";
filterYear = 2014;
svg.style("stroke", "black").style("fill", "lightgrey").style("stroke-width", "1px");
var dateFormat;
data.forEach(function(d) {
dateFormat = d3.time.format('%Y');
d.year = dateFormat.parse(d.year);
d.mhi = +d.mhi;
d.mgr = +d.mgr;
parsed_data.push({"id": d.id, "year": d.year, "mhi": d.mhi, "mgr": d.mgr, "liq": +d.liq, "high_end_groc": +d.high_end_groc, "low_end_groc": +d.low_end_groc, "groc": +d.groc});
});
biz.forEach(function(d) {
d.end_date = dateFormat.parse(d.end_date);
d.start_date = dateFormat.parse(d.start_date);
d.storetype = d.storetype;
d.lat = +d.lat;
d.lon = +d.lon;
parsed_biz.push({"end_date": d.end_date, "storetype": d.storetype, "lat": d.lat, "lon": d.lon, "start_date": d.start_date});
});
ndx = crossfilter(parsed_data);
yearDim = ndx.dimension(function(d) { return [d.id, +d.year.getFullYear()]; });
tractDim = ndx.dimension(function(d) { return d.id; });
vector = svg.append("g")
.attr("class", "vector")
.call(renderTiles);
d3.select(".loading").remove();
render(filterYear, filterMetric);
plot_biz(false);
}
function sort_group(group, order) {
return {
all: function() {
var g = group.all(), map = {};
g.forEach(function(kv) {
map[kv.key] = kv.value;
});
return order.map(function(k) {
return {key: k, value: map[k]};
});
}
};
};
function render(filterYear, filterMetric) {
// dc.filterAll();
var metric_grouped = metricYearChange_map();
var year_grouped = metricChange_timeline();
var hg_grouped = yearDim.group(function(d) { return d;}).reduceSum(
function(d) {
return d.high_end_groc;
});
var lg_grouped = yearDim.group(function(d) { return d;}).reduceSum(
function(d) {
return d.low_end_groc;
});
var order = parsed_data.map(function(values) {
return [values.id, +values.year.getFullYear()];
});
//
// var hg_grouped = sort_group(hg_grouped, order);
// var lg_grouped = sort_group(lg_grouped, order)
var specColors = color(filterMetric);
var specDomains = domains[filterMetric];
map.projection(projection)
.dimension(tractDim)
.group(metric_grouped)
.valueAccessor(function(d) {
console.log(filterMetric_mapping[filterMetric]);
if (filterMetric === "mhi") {
return d.value.mhi;
} else if (filterMetric === "pov") {
return d.value.pov;
} else { //default on mgr
return d.value.mgr;
}
})
.colorAccessor(function(d) {
return d;
})
.overlayGeoJson(sf_topojson, 'sf', function(d) { return d.id; })
.colors(d3.scale.threshold().domain(specDomains).range(specColors))
.title(function (d) {
return "Census Tract " + d.key + "\n" + filterMetric_mapping[filterMetric] + " : " + format(d.value ? d.value : 0);
})
.transitionDuration(500);
// timeline.width(670)
// .height(300)
// .margins({top: 10, right: 50, bottom: 30, left: 50})
// .dimension(yearDim)
// .group(year_grouped)
// .keyAccessor(function(d) { return +d.key[1]; })
// .valueAccessor(function(d) {
// return +d.value;
// })
// .seriesAccessor(function(d) { return d.key[0]; })
// .x(d3.scale.linear().domain([2000, 2014]))
// .renderHorizontalGridLines(true)
// .xAxisLabel("Year")
// .yAxisLabel(function(d) {
// if (filterMetric === "mhi") {
// return "Mean Household Income";
// } else { //default on mgr
// return "Median Gross Rent";
// }
// })
// .clipPadding(10)
// .elasticY(true)
// .brushOn(false)
// .xAxis().ticks(8).tickFormat(d3.format("d"));
//
// biz_timeline.width(470)
// .height(300)
// .margins({top: 10, right: 50, bottom: 30, left: 30})
// .dimension(yearDim)
// .group(hg_grouped)
// .keyAccessor(function(d) { return +d.key[1]; })
// .valueAccessor(function(d) {
// return +d.value;
// })
// .seriesAccessor(function(d) { return d.key[0]; })
// .x(d3.scale.linear().domain([2000, 2014]))
// .renderHorizontalGridLines(true)
// .xAxisLabel("Year")
// .yAxisLabel("Number of '$$'+ Grocery Stores")
// .clipPadding(10)
// .elasticY(true)
// .brushOn(false)
// .xAxis().ticks(8).tickFormat(d3.format("d"));
//
// biz2_timeline.width(470)
// .height(300)
// .margins({top: 10, right: 50, bottom: 30, left: 30})
// .dimension(yearDim)
// .group(lg_grouped)
// .keyAccessor(function(d) { return +d.key[1]; })
// .valueAccessor(function(d) {
// return +d.value;
// })
// .seriesAccessor(function(d) { return d.key[0]; })
// .x(d3.scale.linear().domain([2000, 2014]))
// .renderHorizontalGridLines(true)
// .xAxisLabel("Year")
// .yAxisLabel("Number of '$' Grocery Stores")
// .clipPadding(10)
// .elasticY(true)
// .brushOn(false)
// .xAxis().ticks(8).tickFormat(d3.format("d"));
dc.renderAll();
}
function renderTiles() {
svg.selectAll("g")
.data(tiler
.scale(projection.scale() * 2 * Math.PI)
.translate(projection([0, 0])))
.enter().append("g")
.each(function(d) {
var g = d3.select(this);
d3.json("https://vector.mapzen.com/osm/roads/" + d[2] + "/" + d[0] + "/" + d[1] + ".json?api_key=vector-tiles-LM25tq4", function(error, json) {
if (error) throw error;
g.selectAll("path")
.data(json.features.sort(function(a, b) { return a.properties.sort_key - b.properties.sort_key; }))
.enter().append("path")
.attr("class", function(d) { return d.properties.kind; })
.attr("d", path);
});
});
}
function metricChange_timeline() {
var year_grouped = yearDim.group(function(d) { return d;}).reduceSum(
function(d) {
if (filterMetric === "mhi") {
return +d.mhi;
} else if (filterMetric === "pov") {
return +d.pov; | } else { //default on mgr | random_line_split |
|
step4_fit_prior_fast.py | (rates)
mask = self.tObsMask[tidx]
# if train:
# # Randomly sample a subset of drugs and doses
# mask = mask * autograd.Variable(torch.FloatTensor(np.random.random(size=self.tObsMask[tidx].shape) <= 0.5), requires_grad=False)
# Get log probabilities of the data and filter out the missing observations
loss = -(logsumexp(likelihoods.log_prob(self.tY[tidx][:,:,:,None])
+ self.tLogLamWeights[tidx][:,:,None], dim=-1)
* mask)
loss = loss.sum(dim=-1) / mask.sum(dim=-1).clamp(1,mask.shape[-1]) # Average all doses in each curve
loss = loss.sum(dim=-1) / mask.max(dim=-1).values.sum(dim=-1).clamp(1,mask.shape[-2]) # Average across all drugs in each cell line
# print(loss)
# loss = -(likelihoods.log_prob(self.tY[tidx][:,:,:,None]) + self.tLogLamWeights[tidx][:,:,None]).exp().sum(dim=-1).clamp(1e-10, 1).sum()
# loss = -logsumexp(likelihoods.log_prob(self.tY[tidx][:,:,:,None]) + self.tLogLamWeights[tidx][:,:,None], dim=-1).sum()
return loss
loss = -(likelihoods.log_prob(self.tY[tidx][:,:,:,None]) + self.tLogLamWeights[tidx][:,:,None]).exp().sum(dim=-1).clamp(1e-10, 1).log()
print('1', loss)
print('1 nans: {}'.format(np.isnan(loss.data.numpy()).sum()))
print('tObsMask', self.tObsMask[tidx])
print('tObsMask nans: {}'.format(np.isnan(self.tObsMask[tidx].data.numpy()).sum()))
loss = (loss * self.tObsMask[tidx]).sum(dim=-1) / self.tObsMask[tidx].sum(dim=-1).clamp(1, np.prod(self.tObsMask[tidx].shape)) # average across available doses
print('2', loss) | print('3', loss)
print('3 nans: {}'.format(np.isnan(loss.data.numpy()).sum()))
return loss
def fit_mu(self, verbose=False, **kwargs):
self.models = []
self.train_folds = []
self.validation_folds = []
for fold_idx, fold in enumerate(self.folds):
if verbose:
print('Fitting model {}'.format(fold_idx), flush=True)
# Define the training set for this fold
mask = np.ones(self.X.shape[0], dtype=bool)
mask[fold] = False
# Setup some torch variables
self.tLamGrid = autograd.Variable(torch.FloatTensor(self.lam_grid[mask]), requires_grad=False)
self.tLamWeights = autograd.Variable(torch.FloatTensor(self.lam_weights[mask]), requires_grad=False)
self.tLogLamWeights = autograd.Variable(torch.FloatTensor(self.log_lam_weights[mask]), requires_grad=False)
self.tC = autograd.Variable(torch.FloatTensor(self.C[mask]), requires_grad=False)
self.tObsMask = autograd.Variable(torch.FloatTensor(self.obs_mask[mask]), requires_grad=False)
self.tY = autograd.Variable(torch.FloatTensor(self.Y[mask]), requires_grad=False)
# Fit the model to the data, holding out a subset of cell lines entirely
results = fit_nn(self.X[mask],
lambda: NeuralModel(self.nfeatures, self.ndrugs, self.ndoses, **kwargs),
self.loss_fn, verbose=verbose, **kwargs)
model = results['model']
# Save the model to file
torch.save(model, os.path.join(self.model_path, 'model{}.pt'.format(fold_idx)))
self.models.append(model)
# Save the train, validation, and test folds
self.train_folds.append(np.arange(self.X.shape[0])[mask][results['train']])
self.validation_folds.append(np.arange(self.X.shape[0])[mask][results['validation']])
np.save(os.path.join(self.model_path, 'train_fold{}'.format(fold_idx)), self.train_folds[fold_idx])
np.save(os.path.join(self.model_path, 'validation_fold{}'.format(fold_idx)), self.validation_folds[fold_idx])
np.save(os.path.join(self.model_path, 'test_fold{}'.format(fold_idx)), self.folds[fold_idx])
# Get the out-of-sample predictions
self.mu[fold] = self.predict_mu(self.X[fold], model_idx=fold_idx)
# Save the out-of-sample predictions to file
np.save(os.path.join(self.model_path, 'mu'), self.mu)
def predict_mu(self, X, model_idx=None):
if model_idx is None:
return np.mean([self.predict_mu(X, model_idx=idx) for idx in range(len(self.models))], axis=0)
return self.models[model_idx].predict(X)
def _predict_mu_insample(self, fold_idx, sample_idx, drug_idx):
fold = self.folds[fold_idx]
mask = np.ones(self.X.shape[0], dtype=bool)
mask[fold] = False
tau_hat = self.models[fold_idx].predict(self.X[mask][sample_idx:sample_idx+1])[0,drug_idx]
tau_empirical = (self.Y[mask][sample_idx,drug_idx] - self.C[mask][sample_idx,drug_idx]) / self.lam_grid[mask][sample_idx,drug_idx][...,self.lam_grid.shape[-1]//2]
import matplotlib.pyplot as plt
import seaborn as sns
plt.scatter(np.arange(self.Y.shape[2])[::-1], tau_empirical, color='gray', label='Observed')
plt.plot(np.arange(self.Y.shape[2])[::-1], tau_hat, color='blue', label='Predicted')
plt.savefig('plots/mu-insample-fold{}-sample{}-drug{}.pdf'.format(fold_idx, sample_idx, drug_idx), bbox_inches='tight')
plt.close()
def _predict_mu_outsample(self, fold_idx, sample_idx, drug_idx):
fold = self.folds[fold_idx]
tau_hat = self.models[fold_idx].predict(self.X[fold][sample_idx:sample_idx+1])[0,drug_idx]
tau_empirical = (self.Y[fold][sample_idx,drug_idx] - self.C[fold][sample_idx,drug_idx]) / self.lam_grid[fold][sample_idx,drug_idx][...,self.lam_grid.shape[-1]//2]
import matplotlib.pyplot as plt
import seaborn as sns
plt.scatter(np.arange(self.Y.shape[2])[::-1], tau_empirical, color='gray', label='Observed')
plt.plot(np.arange(self.Y.shape[2])[::-1], tau_hat, color='blue', label='Predicted')
plt.savefig('plots/mu-outsample-fold{}-sample{}-drug{}.pdf'.format(fold_idx, sample_idx, drug_idx), bbox_inches='tight')
plt.close()
def load(self):
import warnings
self.models = []
self.train_folds = []
self.validation_folds = []
for fold_idx, fold in enumerate(self.folds):
fold_model_path = os.path.join(self.model_path, 'model{}.pt'.format(fold_idx))
if os.path.exists(fold_model_path):
self.models.append(torch.load(fold_model_path))
self.train_folds.append(np.load(os.path.join(self.model_path, 'train_fold{}.npy'.format(fold_idx))))
self.validation_folds.append(np.load(os.path.join(self.model_path, 'validation_fold{}.npy'.format(fold_idx))))
else:
warnings.warn('Missing model for fold {}'.format(fold_idx))
self.models.append(None)
self.train_folds.append(None)
self.validation_folds.append(None)
self.folds[fold_idx] = try_load(os.path.join(self.model_path, 'test_fold{}.npy'.format(fold_idx)),
np.load,
fail=lambda _: self.folds[fold_idx])
mu_path = os.path.join(self.model_path, 'mu.npy')
if os.path.exists(mu_path):
self.mu = np.load(mu_path)
else:
warnings.warn('Missing out-of-sample mu values')
'''Use a simple neural regression model'''
class NeuralModel(nn.Module):
def __init__(self, nfeatures, ndrugs, ndoses, layers=None, dropout=True, batchnorm=True, **kwargs):
super(NeuralModel, self).__init__()
self.nfeatures = nfeatures
self.ndrugs = ndrugs
self.ndoses = ndoses
self.nout = ndrugs*ndoses
# Setup the NN layers
all_layers = []
prev_out = nfeatures
if layers is not None:
for layer_size in layers:
if dropout:
all_layers.append(nn.Dropout())
if batchnorm:
all_layers.append(nn.BatchNorm1d(prev_out))
all_layers.append(nn.Linear(prev_out, layer_size))
all_layers.append(nn.ReLU())
prev_out = layer_size
# if dropout:
# all_layers.append(nn.Dropout())
| print('2 nans: {}'.format(np.isnan(loss.data.numpy()).sum()))
print()
print('shape', loss.shape)
loss = loss.sum(dim=1) / self.tObsMask[tidx].max(dim=-1).values.sum(dim=-1) | random_line_split |
step4_fit_prior_fast.py | 0.5), requires_grad=False)
# Get log probabilities of the data and filter out the missing observations
loss = -(logsumexp(likelihoods.log_prob(self.tY[tidx][:,:,:,None])
+ self.tLogLamWeights[tidx][:,:,None], dim=-1)
* mask)
loss = loss.sum(dim=-1) / mask.sum(dim=-1).clamp(1,mask.shape[-1]) # Average all doses in each curve
loss = loss.sum(dim=-1) / mask.max(dim=-1).values.sum(dim=-1).clamp(1,mask.shape[-2]) # Average across all drugs in each cell line
# print(loss)
# loss = -(likelihoods.log_prob(self.tY[tidx][:,:,:,None]) + self.tLogLamWeights[tidx][:,:,None]).exp().sum(dim=-1).clamp(1e-10, 1).sum()
# loss = -logsumexp(likelihoods.log_prob(self.tY[tidx][:,:,:,None]) + self.tLogLamWeights[tidx][:,:,None], dim=-1).sum()
return loss
loss = -(likelihoods.log_prob(self.tY[tidx][:,:,:,None]) + self.tLogLamWeights[tidx][:,:,None]).exp().sum(dim=-1).clamp(1e-10, 1).log()
print('1', loss)
print('1 nans: {}'.format(np.isnan(loss.data.numpy()).sum()))
print('tObsMask', self.tObsMask[tidx])
print('tObsMask nans: {}'.format(np.isnan(self.tObsMask[tidx].data.numpy()).sum()))
loss = (loss * self.tObsMask[tidx]).sum(dim=-1) / self.tObsMask[tidx].sum(dim=-1).clamp(1, np.prod(self.tObsMask[tidx].shape)) # average across available doses
print('2', loss)
print('2 nans: {}'.format(np.isnan(loss.data.numpy()).sum()))
print()
print('shape', loss.shape)
loss = loss.sum(dim=1) / self.tObsMask[tidx].max(dim=-1).values.sum(dim=-1)
print('3', loss)
print('3 nans: {}'.format(np.isnan(loss.data.numpy()).sum()))
return loss
def fit_mu(self, verbose=False, **kwargs):
self.models = []
self.train_folds = []
self.validation_folds = []
for fold_idx, fold in enumerate(self.folds):
if verbose:
print('Fitting model {}'.format(fold_idx), flush=True)
# Define the training set for this fold
mask = np.ones(self.X.shape[0], dtype=bool)
mask[fold] = False
# Setup some torch variables
self.tLamGrid = autograd.Variable(torch.FloatTensor(self.lam_grid[mask]), requires_grad=False)
self.tLamWeights = autograd.Variable(torch.FloatTensor(self.lam_weights[mask]), requires_grad=False)
self.tLogLamWeights = autograd.Variable(torch.FloatTensor(self.log_lam_weights[mask]), requires_grad=False)
self.tC = autograd.Variable(torch.FloatTensor(self.C[mask]), requires_grad=False)
self.tObsMask = autograd.Variable(torch.FloatTensor(self.obs_mask[mask]), requires_grad=False)
self.tY = autograd.Variable(torch.FloatTensor(self.Y[mask]), requires_grad=False)
# Fit the model to the data, holding out a subset of cell lines entirely
results = fit_nn(self.X[mask],
lambda: NeuralModel(self.nfeatures, self.ndrugs, self.ndoses, **kwargs),
self.loss_fn, verbose=verbose, **kwargs)
model = results['model']
# Save the model to file
torch.save(model, os.path.join(self.model_path, 'model{}.pt'.format(fold_idx)))
self.models.append(model)
# Save the train, validation, and test folds
self.train_folds.append(np.arange(self.X.shape[0])[mask][results['train']])
self.validation_folds.append(np.arange(self.X.shape[0])[mask][results['validation']])
np.save(os.path.join(self.model_path, 'train_fold{}'.format(fold_idx)), self.train_folds[fold_idx])
np.save(os.path.join(self.model_path, 'validation_fold{}'.format(fold_idx)), self.validation_folds[fold_idx])
np.save(os.path.join(self.model_path, 'test_fold{}'.format(fold_idx)), self.folds[fold_idx])
# Get the out-of-sample predictions
self.mu[fold] = self.predict_mu(self.X[fold], model_idx=fold_idx)
# Save the out-of-sample predictions to file
np.save(os.path.join(self.model_path, 'mu'), self.mu)
def predict_mu(self, X, model_idx=None):
if model_idx is None:
return np.mean([self.predict_mu(X, model_idx=idx) for idx in range(len(self.models))], axis=0)
return self.models[model_idx].predict(X)
def _predict_mu_insample(self, fold_idx, sample_idx, drug_idx):
fold = self.folds[fold_idx]
mask = np.ones(self.X.shape[0], dtype=bool)
mask[fold] = False
tau_hat = self.models[fold_idx].predict(self.X[mask][sample_idx:sample_idx+1])[0,drug_idx]
tau_empirical = (self.Y[mask][sample_idx,drug_idx] - self.C[mask][sample_idx,drug_idx]) / self.lam_grid[mask][sample_idx,drug_idx][...,self.lam_grid.shape[-1]//2]
import matplotlib.pyplot as plt
import seaborn as sns
plt.scatter(np.arange(self.Y.shape[2])[::-1], tau_empirical, color='gray', label='Observed')
plt.plot(np.arange(self.Y.shape[2])[::-1], tau_hat, color='blue', label='Predicted')
plt.savefig('plots/mu-insample-fold{}-sample{}-drug{}.pdf'.format(fold_idx, sample_idx, drug_idx), bbox_inches='tight')
plt.close()
def _predict_mu_outsample(self, fold_idx, sample_idx, drug_idx):
fold = self.folds[fold_idx]
tau_hat = self.models[fold_idx].predict(self.X[fold][sample_idx:sample_idx+1])[0,drug_idx]
tau_empirical = (self.Y[fold][sample_idx,drug_idx] - self.C[fold][sample_idx,drug_idx]) / self.lam_grid[fold][sample_idx,drug_idx][...,self.lam_grid.shape[-1]//2]
import matplotlib.pyplot as plt
import seaborn as sns
plt.scatter(np.arange(self.Y.shape[2])[::-1], tau_empirical, color='gray', label='Observed')
plt.plot(np.arange(self.Y.shape[2])[::-1], tau_hat, color='blue', label='Predicted')
plt.savefig('plots/mu-outsample-fold{}-sample{}-drug{}.pdf'.format(fold_idx, sample_idx, drug_idx), bbox_inches='tight')
plt.close()
def load(self):
import warnings
self.models = []
self.train_folds = []
self.validation_folds = []
for fold_idx, fold in enumerate(self.folds):
fold_model_path = os.path.join(self.model_path, 'model{}.pt'.format(fold_idx))
if os.path.exists(fold_model_path):
self.models.append(torch.load(fold_model_path))
self.train_folds.append(np.load(os.path.join(self.model_path, 'train_fold{}.npy'.format(fold_idx))))
self.validation_folds.append(np.load(os.path.join(self.model_path, 'validation_fold{}.npy'.format(fold_idx))))
else:
warnings.warn('Missing model for fold {}'.format(fold_idx))
self.models.append(None)
self.train_folds.append(None)
self.validation_folds.append(None)
self.folds[fold_idx] = try_load(os.path.join(self.model_path, 'test_fold{}.npy'.format(fold_idx)),
np.load,
fail=lambda _: self.folds[fold_idx])
mu_path = os.path.join(self.model_path, 'mu.npy')
if os.path.exists(mu_path):
self.mu = np.load(mu_path)
else:
warnings.warn('Missing out-of-sample mu values')
'''Use a simple neural regression model'''
class NeuralModel(nn.Module):
def __init__(self, nfeatures, ndrugs, ndoses, layers=None, dropout=True, batchnorm=True, **kwargs):
super(NeuralModel, self).__init__()
self.nfeatures = nfeatures
self.ndrugs = ndrugs
self.ndoses = ndoses
self.nout = ndrugs*ndoses
# Setup the NN layers
all_layers = []
prev_out = nfeatures
if layers is not None:
for layer_size in layers:
if dropout:
all_layers.append(nn.Dropout())
if batchnorm:
all_layers.append(nn.BatchNorm1d(prev_out))
all_layers.append(nn.Linear(prev_out, layer_size))
all_layers.append(nn.ReLU())
prev_out = layer_size
# if dropout:
# all_layers.append(nn.Dropout())
# if batchnorm:
# all_layers.append(nn.BatchNorm1d(prev_out))
all_layers.append(nn.Linear(prev_out, self.nout))
self.fc_in = nn.Sequential(*all_layers)
self.softplus = nn.Softplus()
def | forward | identifier_name |
|
step4_fit_prior_fast.py | # noise = autograd.Variable(torch.FloatTensor(Z), requires_grad=False)
noise = 0 # TEMP
# Get the MVN draw as mu + epsilon
beta = mu + noise
else:
beta = mu
# Logistic transform on the log-odds prior sample
tau = 1 / (1. + (-beta).exp())
# tau = 1 / (1 + nn.Softplus()(beta))
return tau
def create_predictive_model(model_save_path, genomic_features, drug_responses, drug_details,
feature_types=['MUT', 'CNV', 'EXP', 'TISSUE'], no_fix=False,
**kwargs):
print('Loading genomic features')
X = load_dataset(genomic_features, index_col=0)
# Remove any features not specified (this is useful for ablation studies)
for ftype in ['MUT', 'CNV', 'EXP', 'TISSUE']:
if ftype not in feature_types:
select = [c for c in X.columns if not c.startswith(ftype)]
print('Removing {} {} features'.format(X.shape[1] - len(select), ftype))
X = X[select]
feature_names = X.columns
print('Loading response data')
df = load_dataset(drug_responses) # usually data/raw_step3.csv
# Get the observations
treatment_cols = ['raw_max'] + ['raw{}'.format(i) for i in range(2,10)]
Y_raw = df[treatment_cols].values
a_raw = df['Pos_MLE_Shape'].values
b_raw = df['Pos_MLE_Scale'].values
c_raw = df['Neg_MAP_Estimate'].values
# Handle some idiosyncracies of the GDSC dataset
if no_fix:
import warnings
warnings.warn('Fix dosages is not enabled. GDSC data requires fixing; this should only be specified on another dataset.')
else:
select = np.any(np.isnan(Y_raw), axis=1)
Y_raw[select,0::2] = Y_raw[select,:5]
Y_raw[select,1::2] = np.nan
# Transform the dataset into a multi-task regression one
print('Building multi-task response')
raw_index = np.full((Y_raw.shape[0], 2), -1, dtype=int)
cell_ids = {c: i for i,c in enumerate(X.index)}
drug_ids = {d: i for i,d in enumerate(df['DRUG_ID'].unique())}
# cosmic_ids = {row['CELL_LINE_NAME']: row['COSMIC_ID'] for idx, row in df[['CELL_LINE_NAME', 'COSMIC_ID']].drop_duplicates().iterrows()}
Y = np.full((len(cell_ids), len(drug_ids), Y_raw.shape[1]), np.nan)
A = np.full(Y.shape[:2], np.nan)
B = np.full(Y.shape[:2], np.nan)
C = np.full(Y.shape[:2], np.nan)
missing = set()
missing_cosmic = set()
for idx, row in df.iterrows():
cell_id = row['CELL_LINE_NAME']
drug_id = row['DRUG_ID']
# Skip cell lines that have no features
if cell_id not in cell_ids:
missing.add(cell_id)
continue
i, j = cell_ids[cell_id], drug_ids[drug_id]
Y[i,j] = Y_raw[idx]
A[i,j] = a_raw[idx]
B[i,j] = b_raw[idx]
C[i,j] = c_raw[idx]
raw_index[idx] = i, j
# print('Y shape: {} Missing responses: {}'.format(Y.shape, np.isnan(Y).sum()))
# print('Missing any response: {}'.format(np.all(np.all(np.isnan(Y), axis=-1), axis=-1).sum()))
# print('Mismatched cell lines:')
# print(sorted(missing))
# Remove the cell lines missing any responses
all_missing = np.all(np.all(np.isnan(Y), axis=-1), axis=-1)
print('Removing cell lines with no response data:')
print(sorted(X.index[all_missing]))
X = X.iloc[~all_missing]
Y = Y[~all_missing]
A = A[~all_missing]
B = B[~all_missing]
C = C[~all_missing]
raw_index[:,0] -= (raw_index[:,0:1] >= (np.arange(len(all_missing))[all_missing])[None]).sum(axis=1)
print('Loading drug names')
drug_idx = [None for _ in range(len(drug_ids))]
for d,i in drug_ids.items():
drug_idx[i] = d
drug_names = pd.read_csv(drug_details, index_col=0, header=0).loc[drug_idx]['Drug Name'].values
print('Building optimizer')
ebo = EmpiricalBayesOptimizer()
ebo.setup(model_save_path, X.index, drug_names, list(drug_ids.keys()), feature_names,
X.values, Y, A, B, C, raw_index, **kwargs)
return ebo
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import argparse
parser = argparse.ArgumentParser(description='Deep empirical Bayes dose-response model fitting.')
# Experiment settings
parser.add_argument('name', default='gdsc', help='The project name. Will be prepended to plots and saved files.')
parser.add_argument('--drug_responses', default='data/raw_step3.csv', help='The dataset file with all of the experiments.')
parser.add_argument('--genomic_features', default='data/gdsc_all_features.csv', help='The file with the cell line features.')
parser.add_argument('--drug_details', default='data/gdsc_drug_details.csv', help='The data file with all of the drug information (names, targets, etc).')
parser.add_argument('--plot_path', default='plots', help='The path where plots will be saved.')
parser.add_argument('--save_path', default='data', help='The path where data and models will be saved.')
parser.add_argument('--seed', type=int, default=42, help='The pseudo-random number generator seed.')
parser.add_argument('--torch_threads', type=int, default=1, help='The number of threads that pytorch can use in a fold.')
parser.add_argument('--no_fix', action='store_true', default=False, help='Do not correct the dosages.')
parser.add_argument('--nepochs', type=int, default=50, help='The number of training epochs per fold.')
parser.add_argument('--nfolds', type=int, default=10, help='The number of cross validation folds.')
parser.add_argument('--batch_size', type=int, default=10, help='The mini-batch size.')
parser.add_argument('--lr', type=float, default=3e-4, help='The SGD learning rate.')
parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'RMSprop'], default='RMSprop', help='The type of SGD method.')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='The weight decay for SGD.')
parser.add_argument('--step_decay', type=float, default=0.998, help='The exponential decay for the learning rate per epoch.')
parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum, if applicable.')
parser.add_argument('--layers', type=int, nargs='*', default=[1000,200,200], help='The hidden layer dimensions of the NN.')
parser.add_argument('--feature_types', choices=['MUT', 'CNV', 'EXP', 'TISSUE'], nargs='*', default=['MUT', 'CNV', 'EXP', 'TISSUE'], help='The type of genomic features to use. By default we use the full feature set.')
parser.add_argument('--fold', type=int, help='If specified, trains only on a specific cross validation fold. This is useful for parallel/distributed training.')
parser.add_argument('--checkpoint', action='store_true', help='If specified, saves progress after every epoch of training.')
parser.add_argument('--verbose', action='store_true', help='If specified, prints progress to terminal.')
# Get the arguments from the command line
args = parser.parse_args()
dargs = vars(args)
# Seed the random number generators so we get reproducible results
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.set_num_threads(args.torch_threads)
print('Running step 4 with args:')
print(args)
print('Using feature set {}'.format(args.feature_types))
print('Working on project: {}'.format(args.name))
# Create the model directory
model_save_path = os.path.join(args.save_path, args.name)
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
# Load the predictor
ebo = create_predictive_model(model_save_path, **dargs)
print('Fitting model')
ebo.fit_mu(**dargs)
# print('Loading model from file')
# ebo.load()
print('Plotting out of sample examples')
for i in range(5):
| for j in range(5):
ebo._predict_mu_insample(0,i,j) | conditional_block |
|
step4_fit_prior_fast.py | (dim=-1)
print('3', loss)
print('3 nans: {}'.format(np.isnan(loss.data.numpy()).sum()))
return loss
def fit_mu(self, verbose=False, **kwargs):
self.models = []
self.train_folds = []
self.validation_folds = []
for fold_idx, fold in enumerate(self.folds):
if verbose:
print('Fitting model {}'.format(fold_idx), flush=True)
# Define the training set for this fold
mask = np.ones(self.X.shape[0], dtype=bool)
mask[fold] = False
# Setup some torch variables
self.tLamGrid = autograd.Variable(torch.FloatTensor(self.lam_grid[mask]), requires_grad=False)
self.tLamWeights = autograd.Variable(torch.FloatTensor(self.lam_weights[mask]), requires_grad=False)
self.tLogLamWeights = autograd.Variable(torch.FloatTensor(self.log_lam_weights[mask]), requires_grad=False)
self.tC = autograd.Variable(torch.FloatTensor(self.C[mask]), requires_grad=False)
self.tObsMask = autograd.Variable(torch.FloatTensor(self.obs_mask[mask]), requires_grad=False)
self.tY = autograd.Variable(torch.FloatTensor(self.Y[mask]), requires_grad=False)
# Fit the model to the data, holding out a subset of cell lines entirely
results = fit_nn(self.X[mask],
lambda: NeuralModel(self.nfeatures, self.ndrugs, self.ndoses, **kwargs),
self.loss_fn, verbose=verbose, **kwargs)
model = results['model']
# Save the model to file
torch.save(model, os.path.join(self.model_path, 'model{}.pt'.format(fold_idx)))
self.models.append(model)
# Save the train, validation, and test folds
self.train_folds.append(np.arange(self.X.shape[0])[mask][results['train']])
self.validation_folds.append(np.arange(self.X.shape[0])[mask][results['validation']])
np.save(os.path.join(self.model_path, 'train_fold{}'.format(fold_idx)), self.train_folds[fold_idx])
np.save(os.path.join(self.model_path, 'validation_fold{}'.format(fold_idx)), self.validation_folds[fold_idx])
np.save(os.path.join(self.model_path, 'test_fold{}'.format(fold_idx)), self.folds[fold_idx])
# Get the out-of-sample predictions
self.mu[fold] = self.predict_mu(self.X[fold], model_idx=fold_idx)
# Save the out-of-sample predictions to file
np.save(os.path.join(self.model_path, 'mu'), self.mu)
def predict_mu(self, X, model_idx=None):
if model_idx is None:
return np.mean([self.predict_mu(X, model_idx=idx) for idx in range(len(self.models))], axis=0)
return self.models[model_idx].predict(X)
def _predict_mu_insample(self, fold_idx, sample_idx, drug_idx):
fold = self.folds[fold_idx]
mask = np.ones(self.X.shape[0], dtype=bool)
mask[fold] = False
tau_hat = self.models[fold_idx].predict(self.X[mask][sample_idx:sample_idx+1])[0,drug_idx]
tau_empirical = (self.Y[mask][sample_idx,drug_idx] - self.C[mask][sample_idx,drug_idx]) / self.lam_grid[mask][sample_idx,drug_idx][...,self.lam_grid.shape[-1]//2]
import matplotlib.pyplot as plt
import seaborn as sns
plt.scatter(np.arange(self.Y.shape[2])[::-1], tau_empirical, color='gray', label='Observed')
plt.plot(np.arange(self.Y.shape[2])[::-1], tau_hat, color='blue', label='Predicted')
plt.savefig('plots/mu-insample-fold{}-sample{}-drug{}.pdf'.format(fold_idx, sample_idx, drug_idx), bbox_inches='tight')
plt.close()
def _predict_mu_outsample(self, fold_idx, sample_idx, drug_idx):
fold = self.folds[fold_idx]
tau_hat = self.models[fold_idx].predict(self.X[fold][sample_idx:sample_idx+1])[0,drug_idx]
tau_empirical = (self.Y[fold][sample_idx,drug_idx] - self.C[fold][sample_idx,drug_idx]) / self.lam_grid[fold][sample_idx,drug_idx][...,self.lam_grid.shape[-1]//2]
import matplotlib.pyplot as plt
import seaborn as sns
plt.scatter(np.arange(self.Y.shape[2])[::-1], tau_empirical, color='gray', label='Observed')
plt.plot(np.arange(self.Y.shape[2])[::-1], tau_hat, color='blue', label='Predicted')
plt.savefig('plots/mu-outsample-fold{}-sample{}-drug{}.pdf'.format(fold_idx, sample_idx, drug_idx), bbox_inches='tight')
plt.close()
def load(self):
import warnings
self.models = []
self.train_folds = []
self.validation_folds = []
for fold_idx, fold in enumerate(self.folds):
fold_model_path = os.path.join(self.model_path, 'model{}.pt'.format(fold_idx))
if os.path.exists(fold_model_path):
self.models.append(torch.load(fold_model_path))
self.train_folds.append(np.load(os.path.join(self.model_path, 'train_fold{}.npy'.format(fold_idx))))
self.validation_folds.append(np.load(os.path.join(self.model_path, 'validation_fold{}.npy'.format(fold_idx))))
else:
warnings.warn('Missing model for fold {}'.format(fold_idx))
self.models.append(None)
self.train_folds.append(None)
self.validation_folds.append(None)
self.folds[fold_idx] = try_load(os.path.join(self.model_path, 'test_fold{}.npy'.format(fold_idx)),
np.load,
fail=lambda _: self.folds[fold_idx])
mu_path = os.path.join(self.model_path, 'mu.npy')
if os.path.exists(mu_path):
self.mu = np.load(mu_path)
else:
warnings.warn('Missing out-of-sample mu values')
'''Use a simple neural regression model'''
class NeuralModel(nn.Module):
def __init__(self, nfeatures, ndrugs, ndoses, layers=None, dropout=True, batchnorm=True, **kwargs):
super(NeuralModel, self).__init__()
self.nfeatures = nfeatures
self.ndrugs = ndrugs
self.ndoses = ndoses
self.nout = ndrugs*ndoses
# Setup the NN layers
all_layers = []
prev_out = nfeatures
if layers is not None:
for layer_size in layers:
if dropout:
all_layers.append(nn.Dropout())
if batchnorm:
all_layers.append(nn.BatchNorm1d(prev_out))
all_layers.append(nn.Linear(prev_out, layer_size))
all_layers.append(nn.ReLU())
prev_out = layer_size
# if dropout:
# all_layers.append(nn.Dropout())
# if batchnorm:
# all_layers.append(nn.BatchNorm1d(prev_out))
all_layers.append(nn.Linear(prev_out, self.nout))
self.fc_in = nn.Sequential(*all_layers)
self.softplus = nn.Softplus()
def forward(self, X):
fwd = self.fc_in(X).reshape(-1, self.ndrugs, self.ndoses)
# Enforce monotonicity
mu = torch.cat([fwd[:,:,0:1], fwd[:,:,0:1] + self.softplus(fwd[:,:,1:]).cumsum(dim=2)], dim=2)
# Do we want gradients or just predictions?
if self.training:
# Reparameterization trick for beta with diagonal covariance
# Z = np.random.normal(0,1,size=mu.shape)
# noise = autograd.Variable(torch.FloatTensor(Z), requires_grad=False)
noise = 0 # TEMP
# Get the MVN draw as mu + epsilon
beta = mu + noise
else:
beta = mu
# Logistic transform on the log-odds prior sample
tau = 1 / (1. + (-beta).exp())
# tau = 1 / (1 + nn.Softplus()(beta))
return tau
def create_predictive_model(model_save_path, genomic_features, drug_responses, drug_details,
feature_types=['MUT', 'CNV', 'EXP', 'TISSUE'], no_fix=False,
**kwargs):
| print('Loading genomic features')
X = load_dataset(genomic_features, index_col=0)
# Remove any features not specified (this is useful for ablation studies)
for ftype in ['MUT', 'CNV', 'EXP', 'TISSUE']:
if ftype not in feature_types:
select = [c for c in X.columns if not c.startswith(ftype)]
print('Removing {} {} features'.format(X.shape[1] - len(select), ftype))
X = X[select]
feature_names = X.columns
print('Loading response data')
df = load_dataset(drug_responses) # usually data/raw_step3.csv
# Get the observations
treatment_cols = ['raw_max'] + ['raw{}'.format(i) for i in range(2,10)]
Y_raw = df[treatment_cols].values
a_raw = df['Pos_MLE_Shape'].values
b_raw = df['Pos_MLE_Scale'].values | identifier_body |
|
simulating_agent_model_2D.py | (Agent1=None, Agent2=None):
"""
ARGS:
RETURN:
DESCRIPTION:
Gives displacement between two agents.
DEBUG:
FUTURE:
"""
if(type(Agent1) == AGENT):
x1=Agent1.posL[0]
y1=Agent1.posL[1]
elif(type(Agent1) == list):
x1=Agent1[0]
y1=Agent1[1]
else:
exit_with_error("ERROR!!! {} not a supported type\n".format(type(Agent1)))
if(type(Agent2) == AGENT):
x2=Agent2.posL[0]
y2=Agent2.posL[1]
elif(type(Agent2) == list):
x2=Agent2[0]
y2=Agent2[1]
else:
exit_with_error("ERROR!!! {} not a supported type\n".format(type(Agent2)))
return np.sqrt( (x1-x2)**2 + (y1-y2)**2)
def move_agent(Agent=None, AgentL=None, InfectDist=None, Quarantine=None, DeltaT=None):
"""
ARGS:
Agent : AGENT, The AGENT whose trajectory we are computing
AgentL : List of AGENTs, Used for avoiding quarantined AGENTs
InfectDist : Float, Infectiousness distance. Used as radius around
quarantined, infected AGENTs
Quarantine : Boolean, do we quarantine infected agents?
DeltaT : Time interval.
RETURN:
DESCRIPTION:
Moves agent. Applies implied boundary conditions [0,0,0] -> [1,1,1]
If quarantine option used, I do an approximation of reality.
Reasons :
1. Trying to avoid multiple infected individuals gets logically complicated
to code.
2. Avoiding the bounds [0,0,0] -> [1,1,1] and trying to avoid an infected
individual adds yet another level of logical complexity.
Solution :
1. I only avoid the first infected individual that I encounter and then I test
the bounds.
DEBUG:
FUTURE:
"""
xi= Agent.posL[0]
yi= Agent.posL[1]
vx= Agent.vL[0]
vy= Agent.vL[1]
v = np.sqrt(vx**2 + vy**2)
xf= Agent.vL[0] * DeltaT + Agent.posL[0]
yf= Agent.vL[1] * DeltaT + Agent.posL[1]
r = InfectDist # Radius about quarantined individual
# Check if quarantined agent nearby.
# 1. Only consider 1st quarantine encountered b/c it could be potentially very
# challenging to solve for preventing a susceptible from completely avoiding _all_
# quarantined agents.
# 2.
# 3.
# Quarantined agents can't move.
if(Agent.quarantine == True and Agent.immune == True):
return
if(Quarantine == True and Agent.infected == False):
# displacement, Agent final - initial
dfi = np.sqrt((xf-xi)**2 + (yf-yi)**2)
# Get line function, y = mx + b
m = (yf-yi)/(xf-xi) # Slope of line
b = yf - m*xf # Pick a point on the line, solve for intercept
for agent in AgentL:
# Must be quarantined to avoid
if(agent.quarantine == False):
continue
xc = agent.posL[0]
yc = agent.posL[1]
# displ, quarntined - Agent final
dfq = np.sqrt((xc-xf)**2 + (yc-yf)**2)
# displ, quarntined - Agent initial
diq = np.sqrt((xc-xi)**2 + (yc-yi)**2)
# There might be a collision -
# It is possible that both dfq and diq are
# outside of radius, yet the trajectory passes through it
#### Maybe easier if we just say if dfi < 3*r and dfq < 3*r
if(dfq <= r or diq <= r): # This is gross and imprecise.
# Get circle of exclusion line, recall
# 0 = (x - xc)^2 + (y - yc)^2 - r^2
# xc,yc = x,yposition of center of circle
def f(x):
y = m*x+b
return( (x-xc)**2 + (y-yc)**2 - r**2)
### With many root solvers, it requires that f(a)*f(b) < 0. However,
### fsolve doesn't care. It just needs bounds to look
xroots = optimize.fsolve(f, [xc-r, xc+r])
# If there are two roots, which do i pick? Pick closest to Agent
if(len(xroots) == 2):
x1=xroots[0]
y1= m * x1 + b
d1=displacement(Agent,[x1,y1])
x2=xroots[1]
y2= m * x2 + b
d2=displacement(Agent,[x2,y2])
# Use 1st root b/c it is closer
if(d1<d2):
x=x1
y=y1
else:
x=x2
y=y2
elif(len(xroots) == 1):
x = xroot
y = m * xroot + b
else:
exit_with_error("ERROR!!! I don't understand how there can "
"be more than 2 roots!\n")
rx = x-xc
ry = y-yc
rvect = [rx, ry]
# Find line perpendicular to rvect, i.e. tangent to the circle, call it 't'
# Let :
# t = a \hat(i) + b \hat(j)
# rvect = rx \hat(i) + ry \hat(j)
# Solve equation :
# t \dot rvect = 0
# (a \hat(i) + b \hat(j)) \dot (rx \hat(i) + ry \hat(j)) = 0
# a * rx + b * ry = 0
# a = -(b * ry) / rx
### Verticle line
if(rx == 0):
a = 0
b = 1
alpha = np.pi / 2 # 90deg, Angle between tangent and horizontal
### Horizontal line
elif(ry == 0):
a = 1
b = 0
alpha = 0 # 0deg, Angle between tangent and horizontal
### Exerything else
else:
b = 1
a = -b * ry / rx
alpha = np.arctan(b/a) # 0deg, Angle between tangent and horizontal
if(np.isnan(alpha)):
exit_with_error("ERROR!!! np.arctan({}/{}) == nan\n".format(b/a))
#if(np.isclose(np.sqrt(rvect[0]*rvect[0]+rvect[1]*rvect[1]), r) == False):
# exit_with_error("ERROR!!! I don't know how |rvect| != |r|\n")
# Now get angle between rvector and velocity vector
theta = np.arccos( (vx*rvect[0] + vy*rvect[1]) /
np.sqrt((vx**2 + vy**2)*(rvect[0]**2 + rvect[1]**2)))
# Angle of reflection w/r/t to the tangent line on circle
phi = theta - np.pi / 2.0
#phi = theta
vx = v * np.sin(phi) * np.cos(alpha)
vy = v * np.cos(phi * np.sin(alpha))
#print("{:<.5f} {:<.5f} {:<.5f}".format(vx,vy,phi))
xf = vx * DeltaT + xi
yf = vy * DeltaT + yi
Agent.vL[0] = vx
Agent.vL[1] = vy
break
#else:
# continue
# Check bounds
if(xf < 0):
xf = -1.0 * xf
Agent.vL[0] = -1.0 * Agent.vL[0]
if(yf < 0):
yf = -1.0 * yf
Agent.vL[1] = -1.0 * Agent.vL[1]
if(xf > 1.0):
d = xf - 1.0
xf = xf - d
Agent.vL[0] = -1.0 * Agent.vL[0]
if(yf > 1.0):
d = yf - 1.0
yf | displacement | identifier_name |
|
simulating_agent_model_2D.py | do an approximation of reality.
Reasons :
1. Trying to avoid multiple infected individuals gets logically complicated
to code.
2. Avoiding the bounds [0,0,0] -> [1,1,1] and trying to avoid an infected
individual adds yet another level of logical complexity.
Solution :
1. I only avoid the first infected individual that I encounter and then I test
the bounds.
DEBUG:
FUTURE:
"""
xi= Agent.posL[0]
yi= Agent.posL[1]
vx= Agent.vL[0]
vy= Agent.vL[1]
v = np.sqrt(vx**2 + vy**2)
xf= Agent.vL[0] * DeltaT + Agent.posL[0]
yf= Agent.vL[1] * DeltaT + Agent.posL[1]
r = InfectDist # Radius about quarantined individual
# Check if quarantined agent nearby.
# 1. Only consider 1st quarantine encountered b/c it could be potentially very
# challenging to solve for preventing a susceptible from completely avoiding _all_
# quarantined agents.
# 2.
# 3.
# Quarantined agents can't move.
if(Agent.quarantine == True and Agent.immune == True):
return
if(Quarantine == True and Agent.infected == False):
# displacement, Agent final - initial
dfi = np.sqrt((xf-xi)**2 + (yf-yi)**2)
# Get line function, y = mx + b
m = (yf-yi)/(xf-xi) # Slope of line
b = yf - m*xf # Pick a point on the line, solve for intercept
for agent in AgentL:
# Must be quarantined to avoid
if(agent.quarantine == False):
continue
xc = agent.posL[0]
yc = agent.posL[1]
# displ, quarntined - Agent final
dfq = np.sqrt((xc-xf)**2 + (yc-yf)**2)
# displ, quarntined - Agent initial
diq = np.sqrt((xc-xi)**2 + (yc-yi)**2)
# There might be a collision -
# It is possible that both dfq and diq are
# outside of radius, yet the trajectory passes through it
#### Maybe easier if we just say if dfi < 3*r and dfq < 3*r
if(dfq <= r or diq <= r): # This is gross and imprecise.
# Get circle of exclusion line, recall
# 0 = (x - xc)^2 + (y - yc)^2 - r^2
# xc,yc = x,yposition of center of circle
def f(x):
|
### With many root solvers, it requires that f(a)*f(b) < 0. However,
### fsolve doesn't care. It just needs bounds to look
xroots = optimize.fsolve(f, [xc-r, xc+r])
# If there are two roots, which do i pick? Pick closest to Agent
if(len(xroots) == 2):
x1=xroots[0]
y1= m * x1 + b
d1=displacement(Agent,[x1,y1])
x2=xroots[1]
y2= m * x2 + b
d2=displacement(Agent,[x2,y2])
# Use 1st root b/c it is closer
if(d1<d2):
x=x1
y=y1
else:
x=x2
y=y2
elif(len(xroots) == 1):
x = xroot
y = m * xroot + b
else:
exit_with_error("ERROR!!! I don't understand how there can "
"be more than 2 roots!\n")
rx = x-xc
ry = y-yc
rvect = [rx, ry]
# Find line perpendicular to rvect, i.e. tangent to the circle, call it 't'
# Let :
# t = a \hat(i) + b \hat(j)
# rvect = rx \hat(i) + ry \hat(j)
# Solve equation :
# t \dot rvect = 0
# (a \hat(i) + b \hat(j)) \dot (rx \hat(i) + ry \hat(j)) = 0
# a * rx + b * ry = 0
# a = -(b * ry) / rx
### Verticle line
if(rx == 0):
a = 0
b = 1
alpha = np.pi / 2 # 90deg, Angle between tangent and horizontal
### Horizontal line
elif(ry == 0):
a = 1
b = 0
alpha = 0 # 0deg, Angle between tangent and horizontal
### Exerything else
else:
b = 1
a = -b * ry / rx
alpha = np.arctan(b/a) # 0deg, Angle between tangent and horizontal
if(np.isnan(alpha)):
exit_with_error("ERROR!!! np.arctan({}/{}) == nan\n".format(b/a))
#if(np.isclose(np.sqrt(rvect[0]*rvect[0]+rvect[1]*rvect[1]), r) == False):
# exit_with_error("ERROR!!! I don't know how |rvect| != |r|\n")
# Now get angle between rvector and velocity vector
theta = np.arccos( (vx*rvect[0] + vy*rvect[1]) /
np.sqrt((vx**2 + vy**2)*(rvect[0]**2 + rvect[1]**2)))
# Angle of reflection w/r/t to the tangent line on circle
phi = theta - np.pi / 2.0
#phi = theta
vx = v * np.sin(phi) * np.cos(alpha)
vy = v * np.cos(phi * np.sin(alpha))
#print("{:<.5f} {:<.5f} {:<.5f}".format(vx,vy,phi))
xf = vx * DeltaT + xi
yf = vy * DeltaT + yi
Agent.vL[0] = vx
Agent.vL[1] = vy
break
#else:
# continue
# Check bounds
if(xf < 0):
xf = -1.0 * xf
Agent.vL[0] = -1.0 * Agent.vL[0]
if(yf < 0):
yf = -1.0 * yf
Agent.vL[1] = -1.0 * Agent.vL[1]
if(xf > 1.0):
d = xf - 1.0
xf = xf - d
Agent.vL[0] = -1.0 * Agent.vL[0]
if(yf > 1.0):
d = yf - 1.0
yf= yf - d
Agent.vL[1] = -1.0 * Agent.vL[1]
# Adjust Position
Agent.posL[0] = xf
Agent.posL[1] = yf
# Adjust velocity
dvx = random.uniform(-1,1)/100.0 # Want crossing time to be about 25 steps
dvy = random.uniform(-1,1)/100.0
Agent.vL[0] += dvx
Agent.vL[1] += dvy
def main():
"""
ARGS:
RETURN:
1. Creates images. Turn into moving using ffmpeg, e.g.
ffmpeg -framerate 4 -pattern_type glob -i 'output/*.png' -c:v libx264 out.mp4
DESCRIPTION:
DEBUG:
FUTURE:
1. Add option to fit only a specific section of data.
2. Make main loop NOT O(N^2). Maybe organize by position on a grid.
"""
# Check Python version
nArg = len(sys.argv)
# Use python 3
if(sys.version_info[0] != 3):
exit_with_error("ERROR!!! Use Python 3\n")
# Get options
if(len(sys.argv) > 1 and "-h" in sys.argv[1]):
print_help(0)
elif(nArg != 1 and nArg != 2):
print_help(1)
elif(nArg == 1):
quarantine = False
elif(nArg == 2 and sys.argv[1] == "quarantine"):
quarantine = True
startTime = time.time()
print("{} \n". | y = m*x+b
return( (x-xc)**2 + (y-yc)**2 - r**2) | identifier_body |
simulating_agent_model_2D.py | #
#
#
import sys
import numpy as np
import time
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
from error import exit_with_error
from scipy import optimize
import random
random.seed(42) # Change later
from classes import AGENT
def print_help(ExitCode):
"""
ARGS:
RETURN:
DESCRIPTION:
DEBUG:
FUTURE:
"""
sys.stderr.write(
"python3 ./src/simulating_agent_model_2D.py [quarantine]\n"
" [quarantine] : optional, puts infected in quarantine symptoms present\n"
" \n"
" \n"
" After running, create a movie via : \n"
" ffmpeg -framerate 4 -pattern_type glob -i 'tmp/*.png' -c:v libx264 out.mp4\n"
" \n"
" To Run: \n"
" source ~/.local/virtualenvs/python3.7/bin/activate\n")
sys.exit(ExitCode)
def displacement(Agent1=None, Agent2=None):
"""
ARGS:
RETURN:
DESCRIPTION:
Gives displacement between two agents.
DEBUG:
FUTURE:
"""
if(type(Agent1) == AGENT):
x1=Agent1.posL[0]
y1=Agent1.posL[1]
elif(type(Agent1) == list):
x1=Agent1[0]
y1=Agent1[1]
else:
exit_with_error("ERROR!!! {} not a supported type\n".format(type(Agent1)))
if(type(Agent2) == AGENT):
x2=Agent2.posL[0]
y2=Agent2.posL[1]
elif(type(Agent2) == list):
x2=Agent2[0]
y2=Agent2[1]
else:
exit_with_error("ERROR!!! {} not a supported type\n".format(type(Agent2)))
return np.sqrt( (x1-x2)**2 + (y1-y2)**2)
def move_agent(Agent=None, AgentL=None, InfectDist=None, Quarantine=None, DeltaT=None):
"""
ARGS:
Agent : AGENT, The AGENT whose trajectory we are computing
AgentL : List of AGENTs, Used for avoiding quarantined AGENTs
InfectDist : Float, Infectiousness distance. Used as radius around
quarantined, infected AGENTs
Quarantine : Boolean, do we quarantine infected agents?
DeltaT : Time interval.
RETURN:
DESCRIPTION:
Moves agent. Applies implied boundary conditions [0,0,0] -> [1,1,1]
If quarantine option used, I do an approximation of reality.
Reasons :
1. Trying to avoid multiple infected individuals gets logically complicated
to code.
2. Avoiding the bounds [0,0,0] -> [1,1,1] and trying to avoid an infected
individual adds yet another level of logical complexity.
Solution :
1. I only avoid the first infected individual that I encounter and then I test
the bounds.
DEBUG:
FUTURE:
"""
xi= Agent.posL[0]
yi= Agent.posL[1]
vx= Agent.vL[0]
vy= Agent.vL[1]
v = np.sqrt(vx**2 + vy**2)
xf= Agent.vL[0] * DeltaT + Agent.posL[0]
yf= Agent.vL[1] * DeltaT + Agent.posL[1]
r = InfectDist # Radius about quarantined individual
# Check if quarantined agent nearby.
# 1. Only consider 1st quarantine encountered b/c it could be potentially very
# challenging to solve for preventing a susceptible from completely avoiding _all_
# quarantined agents.
# 2.
# 3.
# Quarantined agents can't move.
if(Agent.quarantine == True and Agent.immune == True):
return
if(Quarantine == True and Agent.infected == False):
# displacement, Agent final - initial
dfi = np.sqrt((xf-xi)**2 + (yf-yi)**2)
# Get line function, y = mx + b
m = (yf-yi)/(xf-xi) # Slope of line
b = yf - m*xf # Pick a point on the line, solve for intercept
for agent in AgentL:
# Must be quarantined to avoid
if(agent.quarantine == False):
continue
xc = agent.posL[0]
yc = agent.posL[1]
# displ, quarntined - Agent final
dfq = np.sqrt((xc-xf)**2 + (yc-yf)**2)
# displ, quarntined - Agent initial
diq = np.sqrt((xc-xi)**2 + (yc-yi)**2)
# There might be a collision -
# It is possible that both dfq and diq are
# outside of radius, yet the trajectory passes through it
#### Maybe easier if we just say if dfi < 3*r and dfq < 3*r
if(dfq <= r or diq <= r): # This is gross and imprecise.
# Get circle of exclusion line, recall
# 0 = (x - xc)^2 + (y - yc)^2 - r^2
# xc,yc = x,yposition of center of circle
def f(x):
y = m*x+b
return( (x-xc)**2 + (y-yc)**2 - r**2)
### With many root solvers, it requires that f(a)*f(b) < 0. However,
### fsolve doesn't care. It just needs bounds to look
xroots = optimize.fsolve(f, [xc-r, xc+r])
# If there are two roots, which do i pick? Pick closest to Agent
if(len(xroots) == 2):
x1=xroots[0]
y1= m * x1 + b
d1=displacement(Agent,[x1,y1])
x2=xroots[1]
y2= m * x2 + b
d2=displacement(Agent,[x2,y2])
# Use 1st root b/c it is closer
if(d1<d2):
x=x1
y=y1
else:
x=x2
y=y2
elif(len(xroots) == 1):
x = xroot
y = m * xroot + b
else:
exit_with_error("ERROR!!! I don't understand how there can "
"be more than 2 roots!\n")
rx = x-xc
ry = y-yc
rvect = [rx, ry]
# Find line perpendicular to rvect, i.e. tangent to the circle, call it 't'
# Let :
# t = a \hat(i) + b \hat(j)
# rvect = rx \hat(i) + ry \hat(j)
# Solve equation :
# t \dot rvect = 0
# (a \hat(i) + b \hat(j)) \dot (rx \hat(i) + ry \hat(j)) = 0
# a * rx + b * ry = 0
# a = -(b * ry) / rx
### Verticle line
if(rx == 0):
a = 0
b = 1
alpha = np.pi / 2 # 90deg, Angle between tangent and horizontal
### Horizontal line
elif(ry == 0):
a = 1
b = 0
alpha = 0 # 0deg, Angle between tangent and horizontal
### Exerything else
else:
b = 1
a = -b * ry / rx
alpha = np.arctan(b/a) # 0deg, Angle between tangent and horizontal
if(np.isnan(alpha)):
exit_with_error("ERROR!!! np.arctan({}/{}) == nan\n".format(b/a))
#if(np.isclose(np.sqrt(rvect[0]*rvect[0]+rvect[1]*rvect[1]), r) == False):
# exit_with_error("ERROR!!! I don't know how |rvect| != |r|\n")
# Now get angle between rvector and velocity vector
theta = np.arccos( (vx*rvect[0] + vy*rvect[1]) /
np.sqrt((vx**2 + vy**2)*(rvect[0]**2 + rvect[1]**2)))
# Angle of reflection w/r/t to the tangent | # 2. https://mbi.osu.edu/events/seminar-grzegorz-rempala-mathematical-models-epidemics-tracking-coronavirus-using-dynamic
#
# Future: | random_line_split |
|
simulating_agent_model_2D.py | do an approximation of reality.
Reasons :
1. Trying to avoid multiple infected individuals gets logically complicated
to code.
2. Avoiding the bounds [0,0,0] -> [1,1,1] and trying to avoid an infected
individual adds yet another level of logical complexity.
Solution :
1. I only avoid the first infected individual that I encounter and then I test
the bounds.
DEBUG:
FUTURE:
"""
xi= Agent.posL[0]
yi= Agent.posL[1]
vx= Agent.vL[0]
vy= Agent.vL[1]
v = np.sqrt(vx**2 + vy**2)
xf= Agent.vL[0] * DeltaT + Agent.posL[0]
yf= Agent.vL[1] * DeltaT + Agent.posL[1]
r = InfectDist # Radius about quarantined individual
# Check if quarantined agent nearby.
# 1. Only consider 1st quarantine encountered b/c it could be potentially very
# challenging to solve for preventing a susceptible from completely avoiding _all_
# quarantined agents.
# 2.
# 3.
# Quarantined agents can't move.
if(Agent.quarantine == True and Agent.immune == True):
return
if(Quarantine == True and Agent.infected == False):
# displacement, Agent final - initial
dfi = np.sqrt((xf-xi)**2 + (yf-yi)**2)
# Get line function, y = mx + b
m = (yf-yi)/(xf-xi) # Slope of line
b = yf - m*xf # Pick a point on the line, solve for intercept
for agent in AgentL:
# Must be quarantined to avoid
if(agent.quarantine == False):
continue
xc = agent.posL[0]
yc = agent.posL[1]
# displ, quarntined - Agent final
dfq = np.sqrt((xc-xf)**2 + (yc-yf)**2)
# displ, quarntined - Agent initial
diq = np.sqrt((xc-xi)**2 + (yc-yi)**2)
# There might be a collision -
# It is possible that both dfq and diq are
# outside of radius, yet the trajectory passes through it
#### Maybe easier if we just say if dfi < 3*r and dfq < 3*r
if(dfq <= r or diq <= r): # This is gross and imprecise.
# Get circle of exclusion line, recall
# 0 = (x - xc)^2 + (y - yc)^2 - r^2
# xc,yc = x,yposition of center of circle
def f(x):
y = m*x+b
return( (x-xc)**2 + (y-yc)**2 - r**2)
### With many root solvers, it requires that f(a)*f(b) < 0. However,
### fsolve doesn't care. It just needs bounds to look
xroots = optimize.fsolve(f, [xc-r, xc+r])
# If there are two roots, which do i pick? Pick closest to Agent
if(len(xroots) == 2):
x1=xroots[0]
y1= m * x1 + b
d1=displacement(Agent,[x1,y1])
x2=xroots[1]
y2= m * x2 + b
d2=displacement(Agent,[x2,y2])
# Use 1st root b/c it is closer
if(d1<d2):
x=x1
y=y1
else:
x=x2
y=y2
elif(len(xroots) == 1):
x = xroot
y = m * xroot + b
else:
exit_with_error("ERROR!!! I don't understand how there can "
"be more than 2 roots!\n")
rx = x-xc
ry = y-yc
rvect = [rx, ry]
# Find line perpendicular to rvect, i.e. tangent to the circle, call it 't'
# Let :
# t = a \hat(i) + b \hat(j)
# rvect = rx \hat(i) + ry \hat(j)
# Solve equation :
# t \dot rvect = 0
# (a \hat(i) + b \hat(j)) \dot (rx \hat(i) + ry \hat(j)) = 0
# a * rx + b * ry = 0
# a = -(b * ry) / rx
### Verticle line
if(rx == 0):
a = 0
b = 1
alpha = np.pi / 2 # 90deg, Angle between tangent and horizontal
### Horizontal line
elif(ry == 0):
a = 1
b = 0
alpha = 0 # 0deg, Angle between tangent and horizontal
### Exerything else
else:
b = 1
a = -b * ry / rx
alpha = np.arctan(b/a) # 0deg, Angle between tangent and horizontal
if(np.isnan(alpha)):
exit_with_error("ERROR!!! np.arctan({}/{}) == nan\n".format(b/a))
#if(np.isclose(np.sqrt(rvect[0]*rvect[0]+rvect[1]*rvect[1]), r) == False):
# exit_with_error("ERROR!!! I don't know how |rvect| != |r|\n")
# Now get angle between rvector and velocity vector
theta = np.arccos( (vx*rvect[0] + vy*rvect[1]) /
np.sqrt((vx**2 + vy**2)*(rvect[0]**2 + rvect[1]**2)))
# Angle of reflection w/r/t to the tangent line on circle
phi = theta - np.pi / 2.0
#phi = theta
vx = v * np.sin(phi) * np.cos(alpha)
vy = v * np.cos(phi * np.sin(alpha))
#print("{:<.5f} {:<.5f} {:<.5f}".format(vx,vy,phi))
xf = vx * DeltaT + xi
yf = vy * DeltaT + yi
Agent.vL[0] = vx
Agent.vL[1] = vy
break
#else:
# continue
# Check bounds
if(xf < 0):
xf = -1.0 * xf
Agent.vL[0] = -1.0 * Agent.vL[0]
if(yf < 0):
yf = -1.0 * yf
Agent.vL[1] = -1.0 * Agent.vL[1]
if(xf > 1.0):
d = xf - 1.0
xf = xf - d
Agent.vL[0] = -1.0 * Agent.vL[0]
if(yf > 1.0):
d = yf - 1.0
yf= yf - d
Agent.vL[1] = -1.0 * Agent.vL[1]
# Adjust Position
Agent.posL[0] = xf
Agent.posL[1] = yf
# Adjust velocity
dvx = random.uniform(-1,1)/100.0 # Want crossing time to be about 25 steps
dvy = random.uniform(-1,1)/100.0
Agent.vL[0] += dvx
Agent.vL[1] += dvy
def main():
"""
ARGS:
RETURN:
1. Creates images. Turn into moving using ffmpeg, e.g.
ffmpeg -framerate 4 -pattern_type glob -i 'output/*.png' -c:v libx264 out.mp4
DESCRIPTION:
DEBUG:
FUTURE:
1. Add option to fit only a specific section of data.
2. Make main loop NOT O(N^2). Maybe organize by position on a grid.
"""
# Check Python version
nArg = len(sys.argv)
# Use python 3
if(sys.version_info[0] != 3):
exit_with_error("ERROR!!! Use Python 3\n")
# Get options
if(len(sys.argv) > 1 and "-h" in sys.argv[1]):
print_help(0)
elif(nArg != 1 and nArg != 2):
print_help(1)
elif(nArg == 1):
quarantine = False
elif(nArg == 2 and sys.argv[1] == "quarantine"):
|
startTime = time.time()
print("{} \n | quarantine = True | conditional_block |
dashapp.py | _data,result.best_values['amp'],result.best_values['cen'],result.best_values['width'])
new_dict = {'peak':1,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'mu1':result.best_values['cen']}
elif len(peak_data[i]) == 2:
# For two peaks
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
# Fit two peaks
gmodel = Model(gauss2)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
new_dict = {'peak':2,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'y2':y2.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2']}
else:
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
peak3 = x_data[peak_data[i][2],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
c3 = peak3
a3 = y_data[peak_data[i][2],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
if peak3<= 850:
w3 = 20
elif peak3 <= 900:
w3 = 15
else:
w3 = 10
# Fit three peaks
gmodel = Model(gauss3)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2,a3=a3,c3=c3,w3=w3)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
y3 = gaussian(x_data[:,0],result.best_values['a3'],result.best_values['c3'],result.best_values['w3'])
new_dict = {'peak':3,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),'y1':y1.tolist(),
'y2':y2.tolist(),'y3':y3.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2'],
'mu3':result.best_values['c3']}
peak_data[i] = new_dict
# At this point all the fitting is completed
# Write the data into a json file
new_file_name = 'fitted_data/fitted_'+excel_file[5:]+'.json'
with open(new_file_name, 'w') as outfile:
ujson.dump(peak_data, outfile)
import pickle
# # These variables are used subsequently in the code
# x_data,y_data = read_data('data/400 K LEIS 27.5.xlsx')
# print(x_data.shape,y_data.shape)
# with open('data/x_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(x_data, f, pickle.HIGHEST_PROTOCOL)
# with open('data/y_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(y_data, f, pickle.HIGHEST_PROTOCOL)
#Using pickle for data-source to speed up the loading process
with open('data/x_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
x_data = pickle.load(f)
with open('data/y_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
y_data = pickle.load(f)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Define the dictionary which contains checklist for grid points
grid_points = []
for i in range(169):
grid_points.append({'label':str(i//13)+', '+str(i%13),'value':i})
# Define the dictionary which contains current directory's Excel files
dir_excel_files = []
for file in glob.glob("data/*.xlsx"):
dir_excel_files.append(file)
excel_files_dict = []
for e in dir_excel_files:
excel_files_dict.append({'label':str(e),'value':str(e)})
# Define the dictionary which contains current directory's Fitted json files
dir_fit_files = []
for file in glob.glob("fitted_data/*.json"):
dir_fit_files.append(file)
fit_files_dict = []
fit_files_dict.append({'label':'None','value':0})
for f in dir_fit_files:
fit_files_dict.append({'label':str(f),'value':str(f)})
app.layout = html.Div([
dcc.Tabs([
dcc.Tab(label='Reading Files', children=[
html.Div([
html.H3(children='List of all excel files (.xlsx) in data folder')
]),
dcc.RadioItems(id='select_excel',options=excel_files_dict, value=str(dir_excel_files[0]))
]),
dcc.Tab(label='Generate/Choose Fit', children=[
html.Div([
html.H2('Currently fit available for following files'),
html.H3(id='fit_state',children='Fit not available'),
html.Button(children='Fit now',id='fit-button',
title='Fit the chosen excel file', n_clicks=0),
]),
dcc.RadioItems(id='select_fit_file',options=fit_files_dict, value=0),
dcc.Loading(
id="loading_div",
children=[html.Div([html.Div(id="loading-output")])],
type="circle",
fullscreen=True,
)
]),
dcc.Tab(label='Visualize Data', children=[
html.Div(children=[
html.H3(id ='excel_file_name',children=""),
dcc.Checklist(id='show-fit', options=[{'label':'Show Fitted lines','value':0}]),
dcc.Checklist(id='index-grid', options=grid_points, value=[1]),
dcc.Graph(id='ternary-plot'),
html.Button('Clear All',id='clear-button',
title='Clears all the data graphs from the window', n_clicks=1,disabled=False),
dcc.Graph(id='data-graph')
])]),
dcc.Tab(label='Generate Fit', children=[
html.Div([
html.H1('Tab content 4')])])])])
# Fitting a new data file
n_fit = 0
@app.callback(
Output("loading-output", "children"),
[Input("fit-button", "n_clicks"),
Input('select_excel','value')])
def fit_new_data(fit_clicks,data_file):
global n_fit
if fit_clicks>n_fit:
n_fit = fit_clicks
fit_and_write_json(data_file)
return 'Fitting Done'
# Showing excel name on top of visualization
@app.callback(
Output('excel_file_name','children'),
[Input('select_excel','value')]
)
def select_excel_to_plot(excel_name):
return "Currently showing graph for {0}".format(excel_name[5:])
# Determine if fitting is to be done
@app.callback(
[Output('fit_state','children'),
Output('select_fit_file','value'),
Output('fit-button','disabled')], | [Input('select_excel','value')]
)
def fit_file(excel_file): | random_line_split |
|
dashapp.py | ussian(x,a2,c2,w2)
def | (x,a1,c1,w1,a2,c2,w2,a3,c3,w3):
""" For fitting three gaussian peaks """
return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)+gaussian(x,a3,c3,w3)
# No need to re-run this again if data.json is already built
def fit_and_write_json(excel_file):
"""
This function reads upon the excel file data and writes json file with fitted values
"""
print(excel_file)
# These variables are used subsequently in the code
x_data,y_data = read_data(excel_file)
# Create a dictionary to store peaks for now
data = {}
height = []
for i in range(169):
peaks,_ = find_peaks(y_data[:,i],height=5000,distance=50)
data[i] = np.array(peaks,dtype=int).tolist()
# Currently the dictionary should look like {'1': 1, '2': 2, '3':2 ...} and so on
peak_data = data
# Iterating over all 13 X and 13 Ys
for i in range(169):
# If scipy.signal.find_peaks finds only one peak
if len(peak_data[i]) == 1:
gmodel = Model(gaussian)
peak = x_data[peak_data[i][0],0]
# Initialize appropriate singal from the peak data
# center "c1" comes from the peak data itself
c1 = peak
a1 = y_data[peak_data[i][0],i]
if peak <= 850:
w1 = 20
elif peak <= 900:
w1 = 15
else:
w1 = 10
# Fit using these initial estimates
result = gmodel.fit(y_data[:,i], x=x_data[:,0],amp=a1,cen=c1,width=w1)
y1 = gaussian(x_data,result.best_values['amp'],result.best_values['cen'],result.best_values['width'])
new_dict = {'peak':1,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'mu1':result.best_values['cen']}
elif len(peak_data[i]) == 2:
# For two peaks
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
# Fit two peaks
gmodel = Model(gauss2)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
new_dict = {'peak':2,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'y2':y2.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2']}
else:
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
peak3 = x_data[peak_data[i][2],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
c3 = peak3
a3 = y_data[peak_data[i][2],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
if peak3<= 850:
w3 = 20
elif peak3 <= 900:
w3 = 15
else:
w3 = 10
# Fit three peaks
gmodel = Model(gauss3)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2,a3=a3,c3=c3,w3=w3)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
y3 = gaussian(x_data[:,0],result.best_values['a3'],result.best_values['c3'],result.best_values['w3'])
new_dict = {'peak':3,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),'y1':y1.tolist(),
'y2':y2.tolist(),'y3':y3.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2'],
'mu3':result.best_values['c3']}
peak_data[i] = new_dict
# At this point all the fitting is completed
# Write the data into a json file
new_file_name = 'fitted_data/fitted_'+excel_file[5:]+'.json'
with open(new_file_name, 'w') as outfile:
ujson.dump(peak_data, outfile)
import pickle
# # These variables are used subsequently in the code
# x_data,y_data = read_data('data/400 K LEIS 27.5.xlsx')
# print(x_data.shape,y_data.shape)
# with open('data/x_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(x_data, f, pickle.HIGHEST_PROTOCOL)
# with open('data/y_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(y_data, f, pickle.HIGHEST_PROTOCOL)
#Using pickle for data-source to speed up the loading process
with open('data/x_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
x_data = pickle.load(f)
with open('data/y_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
y_data = pickle.load(f)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Define the dictionary which contains checklist for grid points
grid_points = []
for i in range(169):
grid_points.append({'label':str(i//13)+', '+str(i%13),'value':i})
# Define the dictionary which contains current directory's Excel files
dir_excel_files = []
for file in glob.glob("data/*.xlsx"):
dir_excel_files.append(file)
excel_files_dict = []
for e in dir_excel_files:
excel_files_dict.append({'label':str(e),'value':str(e)})
# Define the dictionary which contains current directory's Fitted json files
dir_fit_files = []
for file in glob.glob("fitted_data/*.json"):
dir_fit_files.append(file)
fit_files_dict = []
fit_files_dict.append({'label':'None','value':0})
for f in dir_fit_files:
fit_files_dict.append({'label':str(f),'value':str(f)})
app.layout = html.Div([
dcc.Tabs([
dcc.Tab(label='Reading Files', children=[
html.Div([
html.H3(children='List of all excel files (.xlsx) in data folder')
]),
dcc.RadioItems(id='select_excel',options=excel_files_dict, value=str(dir_excel_files[0]))
]),
dcc.Tab(label='Generate/Choose Fit', children=[
html.Div([
html.H2('Currently fit available for following files'),
html.H3(id='fit | gauss3 | identifier_name |
dashapp.py |
# Stack with the cummulative y built till now
else:
y = np.hstack((y, df_data.to_numpy()))
# Ideally x_array should be (481, 1), and y should be (481, 169)
return x_array, y
def gaussian(x, amp, cen, width):
"""1-d gaussian: gaussian(x, amp, cen, width)
x: independent variable
amp: amplitude/height of the curve
cen: mean or peak position
width: standard deviation/ spread of the curve
"""
return (amp / (np.sqrt(2*np.pi) * width)) * np.exp(-(x-cen)**2 / (2*width**2))
def gauss2(x,a1,c1,w1,a2,c2,w2):
""" For fitting two gaussian peaks """
return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)
def gauss3(x,a1,c1,w1,a2,c2,w2,a3,c3,w3):
""" For fitting three gaussian peaks """
return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)+gaussian(x,a3,c3,w3)
# No need to re-run this again if data.json is already built
def fit_and_write_json(excel_file):
"""
This function reads upon the excel file data and writes json file with fitted values
"""
print(excel_file)
# These variables are used subsequently in the code
x_data,y_data = read_data(excel_file)
# Create a dictionary to store peaks for now
data = {}
height = []
for i in range(169):
peaks,_ = find_peaks(y_data[:,i],height=5000,distance=50)
data[i] = np.array(peaks,dtype=int).tolist()
# Currently the dictionary should look like {'1': 1, '2': 2, '3':2 ...} and so on
peak_data = data
# Iterating over all 13 X and 13 Ys
for i in range(169):
# If scipy.signal.find_peaks finds only one peak
if len(peak_data[i]) == 1:
gmodel = Model(gaussian)
peak = x_data[peak_data[i][0],0]
# Initialize appropriate singal from the peak data
# center "c1" comes from the peak data itself
c1 = peak
a1 = y_data[peak_data[i][0],i]
if peak <= 850:
w1 = 20
elif peak <= 900:
w1 = 15
else:
w1 = 10
# Fit using these initial estimates
result = gmodel.fit(y_data[:,i], x=x_data[:,0],amp=a1,cen=c1,width=w1)
y1 = gaussian(x_data,result.best_values['amp'],result.best_values['cen'],result.best_values['width'])
new_dict = {'peak':1,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'mu1':result.best_values['cen']}
elif len(peak_data[i]) == 2:
# For two peaks
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
# Fit two peaks
gmodel = Model(gauss2)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
new_dict = {'peak':2,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'y2':y2.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2']}
else:
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
peak3 = x_data[peak_data[i][2],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
c3 = peak3
a3 = y_data[peak_data[i][2],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
if peak3<= 850:
w3 = 20
elif peak3 <= 900:
w3 = 15
else:
w3 = 10
# Fit three peaks
gmodel = Model(gauss3)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2,a3=a3,c3=c3,w3=w3)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
y3 = gaussian(x_data[:,0],result.best_values['a3'],result.best_values['c3'],result.best_values['w3'])
new_dict = {'peak':3,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),'y1':y1.tolist(),
'y2':y2.tolist(),'y3':y3.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2'],
'mu3':result.best_values['c3']}
peak_data[i] = new_dict
# At this point all the fitting is completed
# Write the data into a json file
new_file_name = 'fitted_data/fitted_'+excel_file[5:]+'.json'
with open(new_file_name, 'w') as outfile:
ujson.dump(peak_data, outfile)
import pickle
# # These variables are used subsequently in the code
# x_data,y_data = read_data('data/400 K LEIS 27.5.xlsx')
# print(x_data.shape,y_data.shape)
# with open('data/x_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(x_data, f, pickle.HIGHEST_PROTOCOL)
# with open('data/y_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(y_data, f, pickle.HIGHEST_PROTOCOL)
#Using pickle for data-source to speed up the loading process
with open('data/x_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
x_data = pickle.load(f)
with open('data/y_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
y_data = pickle.load(f)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Define the dictionary which contains checklist for grid points
grid_points = []
for i in range(169):
grid_points.append({'label':str(i//13)+', '+str(i%13),'value':i})
# Define the dictionary which contains current directory's Excel files
dir_excel_files = []
for file in glob.glob("data/*.xlsx"):
dir_excel_files.append(file)
excel_files_dict = []
for e in dir_excel_files:
excel | y = df_data.to_numpy() | conditional_block |
|
dashapp.py |
def gauss2(x,a1,c1,w1,a2,c2,w2):
""" For fitting two gaussian peaks """
return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)
def gauss3(x,a1,c1,w1,a2,c2,w2,a3,c3,w3):
""" For fitting three gaussian peaks """
return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)+gaussian(x,a3,c3,w3)
# No need to re-run this again if data.json is already built
def fit_and_write_json(excel_file):
"""
This function reads upon the excel file data and writes json file with fitted values
"""
print(excel_file)
# These variables are used subsequently in the code
x_data,y_data = read_data(excel_file)
# Create a dictionary to store peaks for now
data = {}
height = []
for i in range(169):
peaks,_ = find_peaks(y_data[:,i],height=5000,distance=50)
data[i] = np.array(peaks,dtype=int).tolist()
# Currently the dictionary should look like {'1': 1, '2': 2, '3':2 ...} and so on
peak_data = data
# Iterating over all 13 X and 13 Ys
for i in range(169):
# If scipy.signal.find_peaks finds only one peak
if len(peak_data[i]) == 1:
gmodel = Model(gaussian)
peak = x_data[peak_data[i][0],0]
# Initialize appropriate singal from the peak data
# center "c1" comes from the peak data itself
c1 = peak
a1 = y_data[peak_data[i][0],i]
if peak <= 850:
w1 = 20
elif peak <= 900:
w1 = 15
else:
w1 = 10
# Fit using these initial estimates
result = gmodel.fit(y_data[:,i], x=x_data[:,0],amp=a1,cen=c1,width=w1)
y1 = gaussian(x_data,result.best_values['amp'],result.best_values['cen'],result.best_values['width'])
new_dict = {'peak':1,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'mu1':result.best_values['cen']}
elif len(peak_data[i]) == 2:
# For two peaks
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
# Fit two peaks
gmodel = Model(gauss2)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
new_dict = {'peak':2,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),
'y1':y1.tolist(),'y2':y2.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2']}
else:
peak1 = x_data[peak_data[i][0],0]
peak2 = x_data[peak_data[i][1],0]
peak3 = x_data[peak_data[i][2],0]
c1 = peak1
a1 = y_data[peak_data[i][0],i]
c2 = peak2
a2 = y_data[peak_data[i][1],i]
c3 = peak3
a3 = y_data[peak_data[i][2],i]
if peak1<= 850:
w1 = 20
elif peak1 <= 900:
w1 = 15
else:
w1 = 10
if peak2<= 850:
w2 = 20
elif peak2 <= 900:
w2 = 15
else:
w2 = 10
if peak3<= 850:
w3 = 20
elif peak3 <= 900:
w3 = 15
else:
w3 = 10
# Fit three peaks
gmodel = Model(gauss3)
result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2,a3=a3,c3=c3,w3=w3)
y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])
y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])
y3 = gaussian(x_data[:,0],result.best_values['a3'],result.best_values['c3'],result.best_values['w3'])
new_dict = {'peak':3,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),'y1':y1.tolist(),
'y2':y2.tolist(),'y3':y3.tolist(),
'mu1':result.best_values['c1'],'mu2':result.best_values['c2'],
'mu3':result.best_values['c3']}
peak_data[i] = new_dict
# At this point all the fitting is completed
# Write the data into a json file
new_file_name = 'fitted_data/fitted_'+excel_file[5:]+'.json'
with open(new_file_name, 'w') as outfile:
ujson.dump(peak_data, outfile)
import pickle
# # These variables are used subsequently in the code
# x_data,y_data = read_data('data/400 K LEIS 27.5.xlsx')
# print(x_data.shape,y_data.shape)
# with open('data/x_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(x_data, f, pickle.HIGHEST_PROTOCOL)
# with open('data/y_data', 'wb') as f:
# # Pickle the 'data' dictionary using the highest protocol available.
# pickle.dump(y_data, f, pickle.HIGHEST_PROTOCOL)
#Using pickle for data-source to speed up the loading process
with open('data/x_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
x_data = pickle.load(f)
with open('data/y_data', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
y_data = pickle.load(f)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Define the dictionary which contains checklist for grid points
grid_points = []
for i in range(169):
grid_points.append({'label':str(i//13)+', '+str(i%13),'value':i})
# Define the dictionary which contains current directory's Excel files
dir_excel_files = []
for file in glob.glob("data/*.xlsx"):
dir_excel_files.append(file)
excel_files_dict = []
for e in dir_excel_files:
excel_files_dict.append({'label':str(e),'value':str(e)})
# Define the dictionary which contains current directory's Fitted json files
dir_fit_files = []
for file in glob.glob("fitted_data/*.json"):
dir_fit_files.append(file)
fit_files_dict = []
fit_files_dict.append({'label':'None','value':0})
for f in dir_fit_files:
fit_files_dict.append({' | """1-d gaussian: gaussian(x, amp, cen, width)
x: independent variable
amp: amplitude/height of the curve
cen: mean or peak position
width: standard deviation/ spread of the curve
"""
return (amp / (np.sqrt(2*np.pi) * width)) * np.exp(-(x-cen)**2 / (2*width**2)) | identifier_body |
|
warm_start.py | 2) * th2d ** 2 - 2 * m2 * l1 * lc2 * sin(th2) * th2d * th1d
h2 = m2 * l1 * lc2 * sin(th2) * th1d ** 2
H = np.array([[h1], [h2]])
phi1 = (m1 * lc1 + m2 * l1) * g * cos(th1) + m2 * lc2 * g * cos(th1 + th2)
phi2 = m2 * lc2 * g * cos(th1 + th2)
PHI = np.array([[phi1], [phi2]])
Bl = np.linalg.inv(M) @ TAU
Blin = np.array([[0, 0], [0, 0], [0, Bl[0].item()], [0, Bl[1].item()]])
DPHI = np.array([[-g * (m1 * lc1 + m2 * l1 + m2 * lc2), -m2 * lc2 * g], [-m2 * lc2 * g, -m2 * lc2 * g]])
Al = -np.linalg.inv(M) @ DPHI
Alin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [Al[1, 0], Al[1, 1], 0, 0]])
Ctr = ctrb(Alin, Blin)
assert np.linalg.matrix_rank(Ctr) == 4
K, S, E = lqr(Alin, Blin, Q, R)
k = np.array(K[1, :])
print(k)
def control(q):
gs = np.array([pi / 2, 0, 0, 0])
return -k.dot(q - gs)
def reward_fn(s, a):
reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])
done = reward < 2
return reward, done
def do_rollout(args): |
for i in range(env.num_steps):
actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)
obs, reward, done, _ = env.step(actions)
local_reward_hist[i, :] = np.copy(reward)
if done:
break
return local_reward_hist, i
# %%b
start = time.time()
config = {"init_state": [0, 0, 0, 0],
"max_torque": max_torque,
"init_state_weights": [0, 0, 0, 0],
"max_t" : 2.5,
"dt": dt,
"m2": m2,
"m1": m1,
"l1": l1,
"lc1": lc1,
"lc2": lc2,
"i1": I1,
"i2": I2,
"integrator" : integrator,
"reward_fn": reward_fn,
"act_hold": 1
}
env = gym.make('su_acrobot-v0', **config)
num_trials = 200000
reward_hist = np.zeros((num_trials, env.num_steps, 1))
X = np.zeros((num_trials, 4), dtype=np.float32)
Y = np.zeros((num_trials, 1), dtype=np.float32)
th1_min = pi / 2 - .5
th1_max = pi / 2 + .5
th2_min = -1
th2_max = 1
th1dot_min = -5
th1dot_max = 5
th2dot_min = -10
th2dot_max = 10
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
pool = Pool() # defaults to number of available CPU's
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i, :] = samples[i, :]
Y[i] = sum(rews) > env.num_steps*3 - 10
th1_min = 0
th1_max = 2*pi
th2_min = -pi
th2_max = pi
th1dot_min = -10
th1dot_max = 10
th2dot_min = -30
th2dot_max = 30
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2), int(num_trials))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i+int(num_trials/2), :] = samples[i, :]
Y[i+int(num_trials/2)] = sum(rews) > env.num_steps*3 - 5
print(time.time() - start)
# %%
from seagul.nn import MLP, fit_model
import torch
net = MLP(4, 1, 2, 32) # output_activation=torch.nn.Softmax)
Y0 = np.ones((num_trials, 1), dtype=np.float32)
w = 1e-2
class_weight = torch.tensor(Y.shape[0]/sum(Y)*w, dtype=torch.float32)
loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
#loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss())
# loss_hist = fit_model(net, X, Y, 100, batch_size=2048)
# loss_hist = fit_model(net, X, Y0, 5, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
plt.close()
plt.plot(loss_hist)
plt.show()
# %%
n_thdot = 1
n_th = 1000
th1_vals = np.linspace(0, 2*pi, n_th)
th2_vals = np.linspace(-pi, pi, n_th)
th1dot_vals = np.linspace(-10, 10, n_th)
th2dot_vals = np.linspace(-30, 30, n_th)
sig = torch.nn.Sigmoid()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
from itertools import product
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])
preds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())
end = time.time()
print(end - start)
fig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))
# generate 2 2d grids for the x & y bounds
x, y = np.meshgrid(th1_vals, th2_vals)
z = preds
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = 0, np.abs(z).max()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('Theta')
ax.set_xlabel('Th1')
ax.set_ylabel('Th2')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([pi/2, 0, th1dot_vals[i], th2dot_vals[j]])
preds = sig(net | x, trial_num = args
th1, th2, dth1, dth2 = x
np.random.seed(trial_num)
local_reward_hist = np.ones((env.num_steps, 1)) * -1
obs = env.reset(init_vec=[th1, th2, dth1, dth2]) | random_line_split |
warm_start.py | , 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [Al[1, 0], Al[1, 1], 0, 0]])
Ctr = ctrb(Alin, Blin)
assert np.linalg.matrix_rank(Ctr) == 4
K, S, E = lqr(Alin, Blin, Q, R)
k = np.array(K[1, :])
print(k)
def control(q):
gs = np.array([pi / 2, 0, 0, 0])
return -k.dot(q - gs)
def reward_fn(s, a):
reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])
done = reward < 2
return reward, done
def do_rollout(args):
x, trial_num = args
th1, th2, dth1, dth2 = x
np.random.seed(trial_num)
local_reward_hist = np.ones((env.num_steps, 1)) * -1
obs = env.reset(init_vec=[th1, th2, dth1, dth2])
for i in range(env.num_steps):
actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)
obs, reward, done, _ = env.step(actions)
local_reward_hist[i, :] = np.copy(reward)
if done:
break
return local_reward_hist, i
# %%b
start = time.time()
config = {"init_state": [0, 0, 0, 0],
"max_torque": max_torque,
"init_state_weights": [0, 0, 0, 0],
"max_t" : 2.5,
"dt": dt,
"m2": m2,
"m1": m1,
"l1": l1,
"lc1": lc1,
"lc2": lc2,
"i1": I1,
"i2": I2,
"integrator" : integrator,
"reward_fn": reward_fn,
"act_hold": 1
}
env = gym.make('su_acrobot-v0', **config)
num_trials = 200000
reward_hist = np.zeros((num_trials, env.num_steps, 1))
X = np.zeros((num_trials, 4), dtype=np.float32)
Y = np.zeros((num_trials, 1), dtype=np.float32)
th1_min = pi / 2 - .5
th1_max = pi / 2 + .5
th2_min = -1
th2_max = 1
th1dot_min = -5
th1dot_max = 5
th2dot_min = -10
th2dot_max = 10
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
pool = Pool() # defaults to number of available CPU's
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i, :] = samples[i, :]
Y[i] = sum(rews) > env.num_steps*3 - 10
th1_min = 0
th1_max = 2*pi
th2_min = -pi
th2_max = pi
th1dot_min = -10
th1dot_max = 10
th2dot_min = -30
th2dot_max = 30
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2), int(num_trials))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i+int(num_trials/2), :] = samples[i, :]
Y[i+int(num_trials/2)] = sum(rews) > env.num_steps*3 - 5
print(time.time() - start)
# %%
from seagul.nn import MLP, fit_model
import torch
net = MLP(4, 1, 2, 32) # output_activation=torch.nn.Softmax)
Y0 = np.ones((num_trials, 1), dtype=np.float32)
w = 1e-2
class_weight = torch.tensor(Y.shape[0]/sum(Y)*w, dtype=torch.float32)
loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
#loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss())
# loss_hist = fit_model(net, X, Y, 100, batch_size=2048)
# loss_hist = fit_model(net, X, Y0, 5, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
plt.close()
plt.plot(loss_hist)
plt.show()
# %%
n_thdot = 1
n_th = 1000
th1_vals = np.linspace(0, 2*pi, n_th)
th2_vals = np.linspace(-pi, pi, n_th)
th1dot_vals = np.linspace(-10, 10, n_th)
th2dot_vals = np.linspace(-30, 30, n_th)
sig = torch.nn.Sigmoid()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
from itertools import product
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])
preds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())
end = time.time()
print(end - start)
fig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))
# generate 2 2d grids for the x & y bounds
x, y = np.meshgrid(th1_vals, th2_vals)
z = preds
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = 0, np.abs(z).max()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('Theta')
ax.set_xlabel('Th1')
ax.set_ylabel('Th2')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([pi/2, 0, th1dot_vals[i], th2dot_vals[j]])
preds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())
end = time.time()
print(end - start)
fig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))
# generate 2 2d grids for the x & y bounds
x, y = np.meshgrid(th1dot_vals, th2dot_vals)
z = preds
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = 0, np.abs(z).max()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('DTheta')
ax.set_xlabel('dth1')
ax.set_ylabel('dth2')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
# %%
torch.set_default_dtype(torch.float32)
def reward_fn(s, a):
| reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])
return reward, False | identifier_body |
|
warm_start.py | 2) * th2d ** 2 - 2 * m2 * l1 * lc2 * sin(th2) * th2d * th1d
h2 = m2 * l1 * lc2 * sin(th2) * th1d ** 2
H = np.array([[h1], [h2]])
phi1 = (m1 * lc1 + m2 * l1) * g * cos(th1) + m2 * lc2 * g * cos(th1 + th2)
phi2 = m2 * lc2 * g * cos(th1 + th2)
PHI = np.array([[phi1], [phi2]])
Bl = np.linalg.inv(M) @ TAU
Blin = np.array([[0, 0], [0, 0], [0, Bl[0].item()], [0, Bl[1].item()]])
DPHI = np.array([[-g * (m1 * lc1 + m2 * l1 + m2 * lc2), -m2 * lc2 * g], [-m2 * lc2 * g, -m2 * lc2 * g]])
Al = -np.linalg.inv(M) @ DPHI
Alin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [Al[1, 0], Al[1, 1], 0, 0]])
Ctr = ctrb(Alin, Blin)
assert np.linalg.matrix_rank(Ctr) == 4
K, S, E = lqr(Alin, Blin, Q, R)
k = np.array(K[1, :])
print(k)
def control(q):
gs = np.array([pi / 2, 0, 0, 0])
return -k.dot(q - gs)
def reward_fn(s, a):
reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])
done = reward < 2
return reward, done
def do_rollout(args):
x, trial_num = args
th1, th2, dth1, dth2 = x
np.random.seed(trial_num)
local_reward_hist = np.ones((env.num_steps, 1)) * -1
obs = env.reset(init_vec=[th1, th2, dth1, dth2])
for i in range(env.num_steps):
actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)
obs, reward, done, _ = env.step(actions)
local_reward_hist[i, :] = np.copy(reward)
if done:
break
return local_reward_hist, i
# %%b
start = time.time()
config = {"init_state": [0, 0, 0, 0],
"max_torque": max_torque,
"init_state_weights": [0, 0, 0, 0],
"max_t" : 2.5,
"dt": dt,
"m2": m2,
"m1": m1,
"l1": l1,
"lc1": lc1,
"lc2": lc2,
"i1": I1,
"i2": I2,
"integrator" : integrator,
"reward_fn": reward_fn,
"act_hold": 1
}
env = gym.make('su_acrobot-v0', **config)
num_trials = 200000
reward_hist = np.zeros((num_trials, env.num_steps, 1))
X = np.zeros((num_trials, 4), dtype=np.float32)
Y = np.zeros((num_trials, 1), dtype=np.float32)
th1_min = pi / 2 - .5
th1_max = pi / 2 + .5
th2_min = -1
th2_max = 1
th1dot_min = -5
th1dot_max = 5
th2dot_min = -10
th2dot_max = 10
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
pool = Pool() # defaults to number of available CPU's
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i, :] = samples[i, :]
Y[i] = sum(rews) > env.num_steps*3 - 10
th1_min = 0
th1_max = 2*pi
th2_min = -pi
th2_max = pi
th1dot_min = -10
th1dot_max = 10
th2dot_min = -30
th2dot_max = 30
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2), int(num_trials))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i+int(num_trials/2), :] = samples[i, :]
Y[i+int(num_trials/2)] = sum(rews) > env.num_steps*3 - 5
print(time.time() - start)
# %%
from seagul.nn import MLP, fit_model
import torch
net = MLP(4, 1, 2, 32) # output_activation=torch.nn.Softmax)
Y0 = np.ones((num_trials, 1), dtype=np.float32)
w = 1e-2
class_weight = torch.tensor(Y.shape[0]/sum(Y)*w, dtype=torch.float32)
loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
#loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss())
# loss_hist = fit_model(net, X, Y, 100, batch_size=2048)
# loss_hist = fit_model(net, X, Y0, 5, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
plt.close()
plt.plot(loss_hist)
plt.show()
# %%
n_thdot = 1
n_th = 1000
th1_vals = np.linspace(0, 2*pi, n_th)
th2_vals = np.linspace(-pi, pi, n_th)
th1dot_vals = np.linspace(-10, 10, n_th)
th2dot_vals = np.linspace(-30, 30, n_th)
sig = torch.nn.Sigmoid()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
from itertools import product
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])
preds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())
end = time.time()
print(end - start)
fig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))
# generate 2 2d grids for the x & y bounds
x, y = np.meshgrid(th1_vals, th2_vals)
z = preds
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = 0, np.abs(z).max()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('Theta')
ax.set_xlabel('Th1')
ax.set_ylabel('Th2')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
start = time.time()
for i, j in product(range(n_th), range(n_th)):
|
preds = sig(net | coords[j, i, :] = np.array([pi/2, 0, th1dot_vals[i], th2dot_vals[j]]) | conditional_block |
warm_start.py | 2) * th2d ** 2 - 2 * m2 * l1 * lc2 * sin(th2) * th2d * th1d
h2 = m2 * l1 * lc2 * sin(th2) * th1d ** 2
H = np.array([[h1], [h2]])
phi1 = (m1 * lc1 + m2 * l1) * g * cos(th1) + m2 * lc2 * g * cos(th1 + th2)
phi2 = m2 * lc2 * g * cos(th1 + th2)
PHI = np.array([[phi1], [phi2]])
Bl = np.linalg.inv(M) @ TAU
Blin = np.array([[0, 0], [0, 0], [0, Bl[0].item()], [0, Bl[1].item()]])
DPHI = np.array([[-g * (m1 * lc1 + m2 * l1 + m2 * lc2), -m2 * lc2 * g], [-m2 * lc2 * g, -m2 * lc2 * g]])
Al = -np.linalg.inv(M) @ DPHI
Alin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [Al[1, 0], Al[1, 1], 0, 0]])
Ctr = ctrb(Alin, Blin)
assert np.linalg.matrix_rank(Ctr) == 4
K, S, E = lqr(Alin, Blin, Q, R)
k = np.array(K[1, :])
print(k)
def | (q):
gs = np.array([pi / 2, 0, 0, 0])
return -k.dot(q - gs)
def reward_fn(s, a):
reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])
done = reward < 2
return reward, done
def do_rollout(args):
x, trial_num = args
th1, th2, dth1, dth2 = x
np.random.seed(trial_num)
local_reward_hist = np.ones((env.num_steps, 1)) * -1
obs = env.reset(init_vec=[th1, th2, dth1, dth2])
for i in range(env.num_steps):
actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)
obs, reward, done, _ = env.step(actions)
local_reward_hist[i, :] = np.copy(reward)
if done:
break
return local_reward_hist, i
# %%b
start = time.time()
config = {"init_state": [0, 0, 0, 0],
"max_torque": max_torque,
"init_state_weights": [0, 0, 0, 0],
"max_t" : 2.5,
"dt": dt,
"m2": m2,
"m1": m1,
"l1": l1,
"lc1": lc1,
"lc2": lc2,
"i1": I1,
"i2": I2,
"integrator" : integrator,
"reward_fn": reward_fn,
"act_hold": 1
}
env = gym.make('su_acrobot-v0', **config)
num_trials = 200000
reward_hist = np.zeros((num_trials, env.num_steps, 1))
X = np.zeros((num_trials, 4), dtype=np.float32)
Y = np.zeros((num_trials, 1), dtype=np.float32)
th1_min = pi / 2 - .5
th1_max = pi / 2 + .5
th2_min = -1
th2_max = 1
th1dot_min = -5
th1dot_max = 5
th2dot_min = -10
th2dot_max = 10
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
pool = Pool() # defaults to number of available CPU's
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i, :] = samples[i, :]
Y[i] = sum(rews) > env.num_steps*3 - 10
th1_min = 0
th1_max = 2*pi
th2_min = -pi
th2_max = pi
th1dot_min = -10
th1dot_max = 10
th2dot_min = -30
th2dot_max = 30
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2), int(num_trials))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i+int(num_trials/2), :] = samples[i, :]
Y[i+int(num_trials/2)] = sum(rews) > env.num_steps*3 - 5
print(time.time() - start)
# %%
from seagul.nn import MLP, fit_model
import torch
net = MLP(4, 1, 2, 32) # output_activation=torch.nn.Softmax)
Y0 = np.ones((num_trials, 1), dtype=np.float32)
w = 1e-2
class_weight = torch.tensor(Y.shape[0]/sum(Y)*w, dtype=torch.float32)
loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
#loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss())
# loss_hist = fit_model(net, X, Y, 100, batch_size=2048)
# loss_hist = fit_model(net, X, Y0, 5, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
plt.close()
plt.plot(loss_hist)
plt.show()
# %%
n_thdot = 1
n_th = 1000
th1_vals = np.linspace(0, 2*pi, n_th)
th2_vals = np.linspace(-pi, pi, n_th)
th1dot_vals = np.linspace(-10, 10, n_th)
th2dot_vals = np.linspace(-30, 30, n_th)
sig = torch.nn.Sigmoid()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
from itertools import product
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])
preds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())
end = time.time()
print(end - start)
fig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))
# generate 2 2d grids for the x & y bounds
x, y = np.meshgrid(th1_vals, th2_vals)
z = preds
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = 0, np.abs(z).max()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('Theta')
ax.set_xlabel('Th1')
ax.set_ylabel('Th2')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([pi/2, 0, th1dot_vals[i], th2dot_vals[j]])
preds = sig(net | control | identifier_name |
fetch.rs | Map;
use hyper::{Body, Client, Method, Request, StatusCode};
use hyper_tls::HttpsConnector;
use std::io;
use std::slice;
use crate::metrics::*;
use floating_duration::TimeAsFloat;
use http::uri::Scheme;
use std::time;
lazy_static! {
static ref HTTP_CLIENT: Client<HttpsConnector<HttpConnector>, Body> = {
Client::builder()
.executor(EVENT_LOOP.0.clone())
.build(HttpsConnector::new(4).unwrap())
};
}
pub fn op_fetch(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> | let host = if let Some(port) = http_uri.port_part() {
format!("{}:{}", host_str, port.as_str())
} else {
let port = if let Some(scheme) = http_uri.scheme_part() {
if scheme == &Scheme::HTTPS {
"443"
} else {
"80"
}
} else {
"80"
};
format!("{}:{}", host_str, port)
};
FETCH_HTTP_REQUESTS_TOTAL
.with_label_values(&[rt.name.as_str(), rt.version.as_str(), host.as_str()])
.inc();
let method = match msg.method() {
msg::HttpMethod::Get => Method::GET,
msg::HttpMethod::Head => Method::HEAD,
msg::HttpMethod::Post => Method::POST,
msg::HttpMethod::Put => Method::PUT,
msg::HttpMethod::Patch => Method::PATCH,
msg::HttpMethod::Delete => Method::DELETE,
msg::HttpMethod::Connect => Method::CONNECT,
msg::HttpMethod::Options => Method::OPTIONS,
msg::HttpMethod::Trace => Method::TRACE,
};
let msg_headers = msg.headers().unwrap();
let mut headers = HeaderMap::new();
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
trace!("header: {} => {}", h.key().unwrap(), h.value().unwrap());
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
let has_body = msg.has_body();
trace!("HAS BODY? {}", has_body);
let req_body = if has_body {
if raw.data_len > 0 {
trace!("STATIC BODY!");
Body::from(unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec())
} else {
trace!("STREAMING BODY");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
rt.streams.lock().unwrap().insert(req_id, sender);
}
Body::wrap_stream(recver.map_err(|_| std::sync::mpsc::RecvError {}))
}
} else {
Body::empty()
};
// let req_body = Body::empty();
let mut req = Request::new(req_body);
{
*req.uri_mut() = http_uri.clone();
*req.method_mut() = method;
*req.headers_mut() = headers;
}
let (p, c) = oneshot::channel::<FlyResult<JsHttpResponse>>();
let rt_name = rt.name.clone();
let rt_version = rt.version.clone();
let method = req.method().clone();
rt.spawn(future::lazy(move || {
let timer = time::Instant::now();
HTTP_CLIENT.request(req).then(move |reserr| {
debug!("got http response (or error)");
if let Err(err) = reserr {
if p.send(Err(err.into())).is_err() {
error!("error sending error for http response :/");
}
return Ok(());
}
let res = reserr.unwrap(); // should be safe.
FETCH_HEADERS_DURATION
.with_label_values(&[
rt_name.as_str(),
rt_version.as_str(),
method.as_str(),
host.as_str(),
res.status().as_str(),
])
.observe(timer.elapsed().as_fractional_secs());
let (parts, body) = res.into_parts();
let mut stream_rx: Option<JsBody> = None;
let has_body = !body.is_end_stream();
if has_body {
stream_rx = Some(JsBody::BoxedStream(Box::new(
body.map_err(|e| format!("{}", e).into()).map(move |chunk| {
let bytes = chunk.into_bytes();
DATA_IN_TOTAL
.with_label_values(&[rt_name.as_str(), rt_version.as_str(), "fetch"])
.inc_by(bytes.len() as i64);
bytes.to_vec()
}),
)));
}
if p.send(Ok(JsHttpResponse {
headers: parts.headers,
status: parts.status,
body: stream_rx,
}))
.is_err()
{
error!("error sending fetch http response");
return Ok(());
}
debug!("done with http request");
Ok(())
})
}));
let fut = c
.map_err(|e| {
FlyError::from(io::Error::new(
io::ErrorKind::Other,
format!("err getting response from oneshot: {}", e).as_str(),
))
})
.and_then(move |reserr: FlyResult<JsHttpResponse>| {
if let Err(err) = reserr {
return Err(err);
}
let res = reserr.unwrap();
let builder = &mut FlatBufferBuilder::new();
let headers: Vec<_> = res
.headers
.iter()
.map(|(key, value)| {
let key = builder.create_string(key.as_str());
let value = builder.create_string(value.to_str().unwrap());
msg::HttpHeader::create(
builder,
&msg::HttpHeaderArgs {
key: Some(key),
value: Some(value),
..Default::default()
},
)
})
.collect();
let res_headers = builder.create_vector(&headers);
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id,
headers: Some(res_headers),
status: res.status.as_u16(),
has_body: res.body.is_some(),
..Default::default()
},
);
if let Some(stream) = res.body {
send_body_stream(ptr, req_id, stream);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
});
Box::new(fut)
}
pub fn op_http_response(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
debug!("handling http response");
let msg = base.msg_as_http_response().unwrap();
let req_id = msg.id();
let status = match StatusCode::from_u16(msg.status()) {
Ok(s) => s,
Err(e) => return odd_future(format!("{}", e).into()),
};
let mut headers = HeaderMap::new();
if let Some(msg_headers) = msg.headers() {
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
}
let mut body: Option<JsBody> = None;
let has_body = msg.has_body();
if has_body {
if raw.data_len == 0 {
debug!("http response will have a streaming body");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
let mut streams = rt.streams.lock().unwrap();
streams.insert(req_id, sender);
}
body = Some(JsBody::Stream(recver));
} else {
debug!("http response will have a static body");
body = Some(JsBody::Static(
unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec(),
));
}
}
let mut responses = rt.responses.lock().unwrap();
match responses.remove(&req_id) {
Some(sender) => {
if sender
.send(JsHttpResponse {
headers: headers,
status: status,
body: body,
})
.is_err()
{
return odd_future("error sending http response".to_string().into());
}
}
None => return odd_future("no response receiver!".to_string().into()),
};
ok_future(None)
}
fn file_request(rt: &mut Runtime, cmd_id: u32, url: &str) -> Box<Op> {
let req_id = get_next_stream_id();
let path: | {
let cmd_id = base.cmd_id();
let msg = base.msg_as_http_request().unwrap();
let url = msg.url().unwrap();
if url.starts_with("file://") {
return file_request(rt, cmd_id, url);
}
let ptr = rt.ptr;
let req_id = msg.id();
let http_uri: hyper::Uri = match url.parse() {
Ok(u) => u,
Err(e) => return odd_future(format!("{}", e).into()),
};
// for the metrics
let host_str = http_uri.host().unwrap_or("unknown"); | identifier_body |
fetch.rs | HeaderMap;
use hyper::{Body, Client, Method, Request, StatusCode};
use hyper_tls::HttpsConnector;
use std::io;
use std::slice;
use crate::metrics::*;
use floating_duration::TimeAsFloat;
use http::uri::Scheme;
use std::time;
lazy_static! {
static ref HTTP_CLIENT: Client<HttpsConnector<HttpConnector>, Body> = {
Client::builder()
.executor(EVENT_LOOP.0.clone())
.build(HttpsConnector::new(4).unwrap())
};
}
pub fn op_fetch(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
let cmd_id = base.cmd_id();
let msg = base.msg_as_http_request().unwrap();
let url = msg.url().unwrap();
if url.starts_with("file://") {
return file_request(rt, cmd_id, url);
}
let ptr = rt.ptr;
let req_id = msg.id();
let http_uri: hyper::Uri = match url.parse() {
Ok(u) => u,
Err(e) => return odd_future(format!("{}", e).into()),
};
// for the metrics
let host_str = http_uri.host().unwrap_or("unknown");
let host = if let Some(port) = http_uri.port_part() {
format!("{}:{}", host_str, port.as_str())
} else {
let port = if let Some(scheme) = http_uri.scheme_part() {
if scheme == &Scheme::HTTPS {
"443"
} else {
"80"
}
} else {
"80"
};
format!("{}:{}", host_str, port)
};
FETCH_HTTP_REQUESTS_TOTAL
.with_label_values(&[rt.name.as_str(), rt.version.as_str(), host.as_str()])
.inc();
let method = match msg.method() {
msg::HttpMethod::Get => Method::GET,
msg::HttpMethod::Head => Method::HEAD,
msg::HttpMethod::Post => Method::POST,
msg::HttpMethod::Put => Method::PUT,
msg::HttpMethod::Patch => Method::PATCH,
msg::HttpMethod::Delete => Method::DELETE,
msg::HttpMethod::Connect => Method::CONNECT,
msg::HttpMethod::Options => Method::OPTIONS,
msg::HttpMethod::Trace => Method::TRACE,
};
let msg_headers = msg.headers().unwrap();
let mut headers = HeaderMap::new();
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
trace!("header: {} => {}", h.key().unwrap(), h.value().unwrap());
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
let has_body = msg.has_body();
trace!("HAS BODY? {}", has_body);
let req_body = if has_body {
if raw.data_len > 0 {
trace!("STATIC BODY!");
Body::from(unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec())
} else {
trace!("STREAMING BODY");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
rt.streams.lock().unwrap().insert(req_id, sender);
}
Body::wrap_stream(recver.map_err(|_| std::sync::mpsc::RecvError {}))
}
} else {
Body::empty()
};
// let req_body = Body::empty();
let mut req = Request::new(req_body);
{
*req.uri_mut() = http_uri.clone();
*req.method_mut() = method;
*req.headers_mut() = headers;
}
let (p, c) = oneshot::channel::<FlyResult<JsHttpResponse>>();
let rt_name = rt.name.clone();
let rt_version = rt.version.clone();
let method = req.method().clone();
rt.spawn(future::lazy(move || {
let timer = time::Instant::now();
HTTP_CLIENT.request(req).then(move |reserr| {
debug!("got http response (or error)");
if let Err(err) = reserr {
if p.send(Err(err.into())).is_err() {
error!("error sending error for http response :/");
}
return Ok(());
}
let res = reserr.unwrap(); // should be safe.
FETCH_HEADERS_DURATION
.with_label_values(&[
rt_name.as_str(),
rt_version.as_str(),
method.as_str(),
host.as_str(),
res.status().as_str(),
])
.observe(timer.elapsed().as_fractional_secs());
let (parts, body) = res.into_parts();
let mut stream_rx: Option<JsBody> = None;
let has_body = !body.is_end_stream();
if has_body {
stream_rx = Some(JsBody::BoxedStream(Box::new(
body.map_err(|e| format!("{}", e).into()).map(move |chunk| {
let bytes = chunk.into_bytes();
DATA_IN_TOTAL
.with_label_values(&[rt_name.as_str(), rt_version.as_str(), "fetch"])
.inc_by(bytes.len() as i64);
bytes.to_vec()
}),
)));
}
if p.send(Ok(JsHttpResponse {
headers: parts.headers,
status: parts.status,
body: stream_rx,
}))
.is_err()
{
error!("error sending fetch http response");
return Ok(());
}
debug!("done with http request");
Ok(())
})
}));
let fut = c
.map_err(|e| {
FlyError::from(io::Error::new(
io::ErrorKind::Other,
format!("err getting response from oneshot: {}", e).as_str(),
))
})
.and_then(move |reserr: FlyResult<JsHttpResponse>| {
if let Err(err) = reserr {
return Err(err);
}
let res = reserr.unwrap();
let builder = &mut FlatBufferBuilder::new();
let headers: Vec<_> = res
.headers
.iter()
.map(|(key, value)| {
let key = builder.create_string(key.as_str());
let value = builder.create_string(value.to_str().unwrap());
msg::HttpHeader::create(
builder,
&msg::HttpHeaderArgs {
key: Some(key),
value: Some(value),
..Default::default()
},
)
})
.collect();
let res_headers = builder.create_vector(&headers);
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id, | status: res.status.as_u16(),
has_body: res.body.is_some(),
..Default::default()
},
);
if let Some(stream) = res.body {
send_body_stream(ptr, req_id, stream);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
});
Box::new(fut)
}
pub fn op_http_response(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
debug!("handling http response");
let msg = base.msg_as_http_response().unwrap();
let req_id = msg.id();
let status = match StatusCode::from_u16(msg.status()) {
Ok(s) => s,
Err(e) => return odd_future(format!("{}", e).into()),
};
let mut headers = HeaderMap::new();
if let Some(msg_headers) = msg.headers() {
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
}
let mut body: Option<JsBody> = None;
let has_body = msg.has_body();
if has_body {
if raw.data_len == 0 {
debug!("http response will have a streaming body");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
let mut streams = rt.streams.lock().unwrap();
streams.insert(req_id, sender);
}
body = Some(JsBody::Stream(recver));
} else {
debug!("http response will have a static body");
body = Some(JsBody::Static(
unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec(),
));
}
}
let mut responses = rt.responses.lock().unwrap();
match responses.remove(&req_id) {
Some(sender) => {
if sender
.send(JsHttpResponse {
headers: headers,
status: status,
body: body,
})
.is_err()
{
return odd_future("error sending http response".to_string().into());
}
}
None => return odd_future("no response receiver!".to_string().into()),
};
ok_future(None)
}
fn file_request(rt: &mut Runtime, cmd_id: u32, url: &str) -> Box<Op> {
let req_id = get_next_stream_id();
let path: | headers: Some(res_headers), | random_line_split |
fetch.rs | Map;
use hyper::{Body, Client, Method, Request, StatusCode};
use hyper_tls::HttpsConnector;
use std::io;
use std::slice;
use crate::metrics::*;
use floating_duration::TimeAsFloat;
use http::uri::Scheme;
use std::time;
lazy_static! {
static ref HTTP_CLIENT: Client<HttpsConnector<HttpConnector>, Body> = {
Client::builder()
.executor(EVENT_LOOP.0.clone())
.build(HttpsConnector::new(4).unwrap())
};
}
pub fn op_fetch(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
let cmd_id = base.cmd_id();
let msg = base.msg_as_http_request().unwrap();
let url = msg.url().unwrap();
if url.starts_with("file://") {
return file_request(rt, cmd_id, url);
}
let ptr = rt.ptr;
let req_id = msg.id();
let http_uri: hyper::Uri = match url.parse() {
Ok(u) => u,
Err(e) => return odd_future(format!("{}", e).into()),
};
// for the metrics
let host_str = http_uri.host().unwrap_or("unknown");
let host = if let Some(port) = http_uri.port_part() {
format!("{}:{}", host_str, port.as_str())
} else {
let port = if let Some(scheme) = http_uri.scheme_part() {
if scheme == &Scheme::HTTPS {
"443"
} else {
"80"
}
} else {
"80"
};
format!("{}:{}", host_str, port)
};
FETCH_HTTP_REQUESTS_TOTAL
.with_label_values(&[rt.name.as_str(), rt.version.as_str(), host.as_str()])
.inc();
let method = match msg.method() {
msg::HttpMethod::Get => Method::GET,
msg::HttpMethod::Head => Method::HEAD,
msg::HttpMethod::Post => Method::POST,
msg::HttpMethod::Put => Method::PUT,
msg::HttpMethod::Patch => Method::PATCH,
msg::HttpMethod::Delete => Method::DELETE,
msg::HttpMethod::Connect => Method::CONNECT,
msg::HttpMethod::Options => Method::OPTIONS,
msg::HttpMethod::Trace => Method::TRACE,
};
let msg_headers = msg.headers().unwrap();
let mut headers = HeaderMap::new();
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
trace!("header: {} => {}", h.key().unwrap(), h.value().unwrap());
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
let has_body = msg.has_body();
trace!("HAS BODY? {}", has_body);
let req_body = if has_body {
if raw.data_len > 0 {
trace!("STATIC BODY!");
Body::from(unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec())
} else {
trace!("STREAMING BODY");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
rt.streams.lock().unwrap().insert(req_id, sender);
}
Body::wrap_stream(recver.map_err(|_| std::sync::mpsc::RecvError {}))
}
} else {
Body::empty()
};
// let req_body = Body::empty();
let mut req = Request::new(req_body);
{
*req.uri_mut() = http_uri.clone();
*req.method_mut() = method;
*req.headers_mut() = headers;
}
let (p, c) = oneshot::channel::<FlyResult<JsHttpResponse>>();
let rt_name = rt.name.clone();
let rt_version = rt.version.clone();
let method = req.method().clone();
rt.spawn(future::lazy(move || {
let timer = time::Instant::now();
HTTP_CLIENT.request(req).then(move |reserr| {
debug!("got http response (or error)");
if let Err(err) = reserr {
if p.send(Err(err.into())).is_err() {
error!("error sending error for http response :/");
}
return Ok(());
}
let res = reserr.unwrap(); // should be safe.
FETCH_HEADERS_DURATION
.with_label_values(&[
rt_name.as_str(),
rt_version.as_str(),
method.as_str(),
host.as_str(),
res.status().as_str(),
])
.observe(timer.elapsed().as_fractional_secs());
let (parts, body) = res.into_parts();
let mut stream_rx: Option<JsBody> = None;
let has_body = !body.is_end_stream();
if has_body {
stream_rx = Some(JsBody::BoxedStream(Box::new(
body.map_err(|e| format!("{}", e).into()).map(move |chunk| {
let bytes = chunk.into_bytes();
DATA_IN_TOTAL
.with_label_values(&[rt_name.as_str(), rt_version.as_str(), "fetch"])
.inc_by(bytes.len() as i64);
bytes.to_vec()
}),
)));
}
if p.send(Ok(JsHttpResponse {
headers: parts.headers,
status: parts.status,
body: stream_rx,
}))
.is_err()
{
error!("error sending fetch http response");
return Ok(());
}
debug!("done with http request");
Ok(())
})
}));
let fut = c
.map_err(|e| {
FlyError::from(io::Error::new(
io::ErrorKind::Other,
format!("err getting response from oneshot: {}", e).as_str(),
))
})
.and_then(move |reserr: FlyResult<JsHttpResponse>| {
if let Err(err) = reserr {
return Err(err);
}
let res = reserr.unwrap();
let builder = &mut FlatBufferBuilder::new();
let headers: Vec<_> = res
.headers
.iter()
.map(|(key, value)| {
let key = builder.create_string(key.as_str());
let value = builder.create_string(value.to_str().unwrap());
msg::HttpHeader::create(
builder,
&msg::HttpHeaderArgs {
key: Some(key),
value: Some(value),
..Default::default()
},
)
})
.collect();
let res_headers = builder.create_vector(&headers);
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id,
headers: Some(res_headers),
status: res.status.as_u16(),
has_body: res.body.is_some(),
..Default::default()
},
);
if let Some(stream) = res.body {
send_body_stream(ptr, req_id, stream);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
});
Box::new(fut)
}
pub fn | (rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
debug!("handling http response");
let msg = base.msg_as_http_response().unwrap();
let req_id = msg.id();
let status = match StatusCode::from_u16(msg.status()) {
Ok(s) => s,
Err(e) => return odd_future(format!("{}", e).into()),
};
let mut headers = HeaderMap::new();
if let Some(msg_headers) = msg.headers() {
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
}
let mut body: Option<JsBody> = None;
let has_body = msg.has_body();
if has_body {
if raw.data_len == 0 {
debug!("http response will have a streaming body");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
let mut streams = rt.streams.lock().unwrap();
streams.insert(req_id, sender);
}
body = Some(JsBody::Stream(recver));
} else {
debug!("http response will have a static body");
body = Some(JsBody::Static(
unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec(),
));
}
}
let mut responses = rt.responses.lock().unwrap();
match responses.remove(&req_id) {
Some(sender) => {
if sender
.send(JsHttpResponse {
headers: headers,
status: status,
body: body,
})
.is_err()
{
return odd_future("error sending http response".to_string().into());
}
}
None => return odd_future("no response receiver!".to_string().into()),
};
ok_future(None)
}
fn file_request(rt: &mut Runtime, cmd_id: u32, url: &str) -> Box<Op> {
let req_id = get_next_stream_id();
let path: | op_http_response | identifier_name |
treeFactory.js | '15'},{
name: '欧洲游',
id: '11',
children: [{
name: '挪威游',
id: '111'
}, {
name: '冰岛游',
id: '112'
}]
}, {
name: '北美游',
id: '12',
children: [{
name: '美国游',
id: '121',
children: [{
name: '加州游',
id: '1211'
}, {
name: '纽约游',
id: '1212'
}]
}, {
name: '加拿大游',
id: '122'
}]
}, {
name: '亚洲游',
id: '13',
children: [{
name: '日本游',
id: '131'
}, {
name: '韩国游',
id: '132'
}, {
name: '新马泰游',
id: '133'
}]
}]
}
const R = 30
const width = 960
const height = 960
const fontSize = 12
const dx = 10
const dy = 160
function separation(a, b) {
if (a.parent === b.parent) {
return ((2*R + 1)*(a.value + b.value)/2) / dx
} else {
return 2
}
}
class Tree {
constructor(selector) {
this.data = null;
this.root = null;
this.container = selector ? document.querySelector(selector) : document.querySelector('body');
this.tree = d3.tree().nodeSize([dx, dy]).separation(separation)
this.diagonal = d3.linkHorizontal().x(d => d.y).y(d => d.x)
this.eventEmitter = Object.create(null);
// 事件绑定
this.handleNodeClick = this.handleNodeClick.bind(this)
this.handleEditClick = this.handleEditClick.bind(this)
this.handleAddClick = this.handleAddClick.bind(this)
this.handleDeleteClick = this.handleDeleteClick.bind(this)
this.handleToggleCollapseClick = this.handleToggleCollapseClick.bind(this)
}
/************************************************************
* eventEmitter
* type: edit, add, delete
************************************************************/
on(type, fn) {
if (this.eventEmitter[type]) {
this.eventEmitter[type].push(fn)
} else {
this.eventEmitter[type] = [fn];
}
}
clearEventEmitter() {
this.eventEmitter = null;
}
/************************************************************
* 初始化
************************************************************ | ata;
this.initRoot(data);
this.initSVG();
if (this.root) {
this.render(this.root)
}
}
/************************************************************
* 初始化root
************************************************************/
initRoot() {
const root = d3.hierarchy(this.data).count();
root.x0 = dy / 2;
root.y0 = 0;
root.descendants().forEach((d, i) => {
d.id = d.data.id ? d.data.id : i;
d._children = d.children;
});
this.root = root;
}
/************************************************************
* 初始化SVG
************************************************************/
initSVG() {
this.svg = d3.create("svg")
.style("font", "12px sans-serif")
.style("user-select", "none");
this.gLink = this.svg.append("g")
.attr("fill", "none")
.attr("stroke", "#555")
.attr("stroke-opacity", 0.4)
.attr("stroke-width", 1.5);
this.gNode = this.svg.append("g")
.attr("cursor", "pointer");
this.container.appendChild(this.svg.node());
}
/************************************************************
* 更新树
************************************************************/
update(data, d) {
this.data = data;
this.initRoot()
this.render(d)
}
render(source) {
const root = this.root;
// Compute the new tree layout.
this.tree(root);
this.updateTransition();
// Update the nodes…
this.updateNodes(source);
// Update the links…
this.updateLinks(source);
// Stash the old positions for transition.
root.eachBefore(d => {
d.x0 = d.x;
d.y0 = d.y;
});
}
updateTransition() {
const svg = this.svg;
const duration = d3.event && d3.event.altKey ? 2500 : 250;
const height = this.height(); // svg height
const svgTranslateX = 2 * R;
const svgTranslateY = height/2 - R;
this.transition = this.svg.transition()
.duration(duration)
.attr("height", height)
.attr("viewBox", [-svgTranslateX, -svgTranslateY, width, height])
.tween("resize", window.ResizeObserver ? null : () => () => svg.dispatch("toggle"));
}
updateNodes(source) {
const transition = this.transition;
const nodes = this.root.descendants().reverse();
const node = this.gNode.selectAll("g")
.data(nodes, d => d.id);
// if showToolkit is true, show toolkit
const withToolKit = node.filter(d => d.showToolkit === true)
this.addOperationButtonToNode(withToolKit)
// Enter any new nodes at the parent's previous position.
const nodeEnter = node.enter().append("g")
.attr("transform", d => `translate(${source.y0},${source.x0})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0)
.on("click", this.handleNodeClick);
nodeEnter.append("circle")
.attr("r", R)
.attr("fill", d => d._children ? "#555" : "#999");
nodeEnter.append("text")
.attr("dy", "0.31em")
.attr("x", d => -(d.data.name.length * fontSize / 2))
.text(d => d.data.name)
.clone(true).lower()
.attr("stroke-linejoin", "round")
.attr("stroke-width", 3)
.attr("stroke", "white");
// Transition nodes to their new position.
const nodeUpdate = node.merge(nodeEnter).transition(transition)
.attr("transform", d => `translate(${d.y},${d.x})`)
.attr("fill-opacity", 1)
.attr("stroke-opacity", 1);
// Transition exiting nodes to the parent's new position.
const nodeExit = node.exit().transition(transition).remove()
.attr("transform", d => `translate(${source.y},${source.x})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0);
}
updateLinks(source) {
const transition = this.transition;
const links = this.root.links();
const link = this.gLink.selectAll("path")
.data(links, d => d.target.id);
// Enter any new links at the parent's previous position.
const linkEnter = link.enter().append("path")
.attr("d", d => {
const o = {x: source.x0, y: source.y0};
return this.diagonal({source: o, target: o});
});
// Transition links to their new position.
link.merge(linkEnter).transition(transition)
.attr("d", this.diagonal);
// Transition exiting nodes to the parent's new position.
link.exit().transition(transition).remove()
.attr("d", d => {
const o = {x: source.x, y: source.y};
return this.diagonal({source: o, target: o});
});
}
/************************************************************
* 公共函数,可提取出去
************************************************************/
height() {
if (this.root.value) {
return (2*R + dx/2) * this.root.value + 2*R
} else {
return height
}
}
/************************************************************
* 事件处理函数,可提取出去
************************************************************/
// 节点点击事件
handleNodeClick(d) {
// toggle toolkit
this.toggleToolkit(d);
this.render(d);
}
// 编辑
handleEditClick(d) {
const editFnArr = this.eventEmitter.edit;
if (editFnArr) {
editFnArr.forEach(fn => fn.call(null, d))
}
}
// 添加
handleAddClick(d) {
const addFnArr = this.eventEmitter.add;
if (addFnArr) {
addFnArr.forEach(fn => fn.call(null, d))
}
}
// 删除
handleDeleteClick(d) {
const deleteFnArr = this.eventEmitter.delete;
if (deleteFnArr) {
deleteFnArr.forEach(fn => fn.call(null, d))
}
}
// toggle collapse
handleToggleCollapseClick(d) {
this.toggleCollapse(d);
}
/************************************************************
* 数据操作,可提取出去
************************************************************/
// 展开/ | /
init(data) {
this.data = d | identifier_body |
treeFactory.js | '15'},{
name: '欧洲游',
id: '11',
children: [{
name: '挪威游',
id: '111'
}, {
name: '冰岛游',
id: '112'
}]
}, {
name: '北美游',
id: '12',
children: [{
name: '美国游',
id: '121',
children: [{
name: '加州游',
id: '1211'
}, {
name: '纽约游',
id: '1212'
}]
}, {
name: '加拿大游',
id: '122'
}]
}, {
name: '亚洲游',
id: '13',
children: [{
name: '日本游',
id: '131'
}, {
name: '韩国游',
id: '132'
}, {
name: '新马泰游',
id: '133'
}]
}]
}
const R = 30
const width = 960
const height = 960
const fontSize = 12
const dx = 10
const dy = 160
function separation(a, b) {
if (a.parent === b.parent) {
return ((2*R + 1)*(a.value + b.value)/2) / dx
} else {
return 2
}
}
class Tree {
constructor(selector) {
this.data = null;
this.root = null;
this.container = selector ? document.querySelector(selector) : document.querySelector('body');
this.tree = d3.tree().nodeSize([dx, dy]).separation(separation)
this.diagonal = d3.linkHorizontal().x(d => d.y).y(d => d.x)
this.eventEmitter = Object.create(null);
// 事件绑定
this.handleNodeClick = this.handleNodeClick.bind(this)
this.handleEditClick = this.handleEditClick.bind(this)
this.handleAddClick = this.handleAddClick.bind(this)
this.handleDeleteClick = this.handleDeleteClick.bind(this)
this.handleToggleCollapseClick = this.handleToggleCollapseClick.bind(this)
}
/************************************************************
* eventEmitter
* type: edit, add, delete
************************************************************/
on(type, fn) {
if (this.eventEmitter[type]) {
this.eventEmitter[type].push(fn)
} else {
this.eventEmitter[type] = [fn];
}
}
clearEventEmitter() {
this.eventEmitter = null;
}
/************************************************************
* 初始化
************************************************************/
init(data) {
this.data = data;
this.initRoot(data);
this.initSVG();
if (this.root) {
this.render(this.root)
}
}
/************************************************************
* 初始化root
************************************************************/
initRoot() {
const root = d3.hierarchy(this.data).count();
root.x0 = dy / 2;
root.y0 = 0;
root.descendants().forEach((d, i) => {
d.id = d.data.id ? d.data.id : i;
d._children = d.children;
});
this.root = root;
}
/************************************************************
* 初始化SVG
************************************************************/
initSVG() {
this.svg = d3.create("svg")
.style("font", "12px sans-serif")
.style("user-select", "none");
this.gLink = this.svg.append("g")
.attr("fill", "none")
.attr("stroke", "#555")
.attr("stroke-opacity", 0.4)
.attr("stroke-width", 1.5);
this.gNode = this.svg.append("g")
.attr("cursor", "pointer");
this.container.appendChild(this.svg.node());
}
/************************************************************
* 更新树
************************************************************/
update(data, d) {
this.data = data;
this.initRoot()
this.render(d)
}
render(source) {
const root = this.root;
// Compute the new tree layout.
this.tree(root);
this.updateTransition();
// Update the nodes…
this.updateNodes(source);
// Update the links…
this.updateLinks(source);
// Stash the old positions for transition.
root.eachBefore(d => {
d.x0 = d.x;
d.y0 = d.y;
});
}
updateTransition() {
const svg = this.svg;
const duration = d3.event && d3.event.altKey ? 2500 : 250;
const height = this.height(); // svg height
const svgTranslateX = 2 * R;
const svgTranslateY = height/2 - R;
this.transition = this.svg.transition()
.duration(duration)
.attr("height", height)
.attr("viewBox", [-svgTranslateX, -svgTranslateY, width, height])
.tween("resize", window.ResizeObserver ? null : () => () => svg.dispatch("toggle"));
}
updateNodes(source) {
const transition = this.transition;
const nodes = this.root.descendants().reverse();
const node = this.gNode.selectAll("g")
.data(nodes, d => d.id);
// if showToolkit is true, show toolkit
const withToolKit = node.filter(d => d.showToolkit === true)
this.addOperationButtonToNode(withToolKit)
// Enter any new nodes at the parent's previous position.
const nodeEnter = node.enter().append("g")
.attr("transform", d => `translate(${source.y0},${source.x0})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0)
.on("click", this.handleNodeClick);
nodeEnter.append("circle")
.attr("r", R)
.attr("fill", d => d._children ? "#555" : "#999");
nodeEnter.append("text")
.attr("dy", "0.31em")
.attr("x", d => -(d.data.name.length * fontSize / 2))
.text(d => d.data.name)
.clone(true).lower()
.attr("stroke-linejoin", "round")
.attr("stroke-width", 3)
.attr("stroke", "white");
// Transition nodes to their new position.
const nodeUpdate = node.merge(nodeEnter).transition(transition)
.attr("transform", d => `translate(${d.y},${d.x})`)
.attr("fill-opacity", 1)
.attr("stroke-opacity", 1);
// Transition exiting nodes to the parent's new position.
const nodeExit = node.exit().transition(transition).remove()
.attr("transform", d => `translate(${source.y},${source.x})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0);
}
updateLinks(source) {
const transition = this.transition;
const links = this.root.links();
const link = this.gLink.selectAll("path")
.data(links, d => d.target.id);
// Enter any new links at the parent's previous position.
const linkEnter = link.enter().append("path")
.attr("d", d => {
const o = {x: source.x0, y: source.y0};
return this.diagonal({source: o, target: o});
});
// Transition links to their new position.
link.merge(linkEnter).transition(transition)
.attr("d", this.diagonal);
// Transition exiting nodes to the parent's new position.
link.exit().transition(transition).remove()
.attr("d", d => {
const o = {x: source.x, y: source.y};
return this.diagonal({source: o, target: o});
});
}
/************************************************************
* 公共函数,可提取出去
************************************************************/
height() {
if (this.root.value) {
return (2*R + dx/2) * this.root.value + 2*R
} else {
return height
}
}
/************************************************************
* 事件处理函数,可提取出去
**** | **************************************************/
// 节点点击事件
handleNodeClick(d) {
// toggle toolkit
this.toggleToolkit(d);
this.render(d);
}
// 编辑
handleEditClick(d) {
const editFnArr = this.eventEmitter.edit;
if (editFnArr) {
editFnArr.forEach(fn => fn.call(null, d))
}
}
// 添加
handleAddClick(d) {
const addFnArr = this.eventEmitter.add;
if (addFnArr) {
addFnArr.forEach(fn => fn.call(null, d))
}
}
// 删除
handleDeleteClick(d) {
const deleteFnArr = this.eventEmitter.delete;
if (deleteFnArr) {
deleteFnArr.forEach(fn => fn.call(null, d))
}
}
// toggle collapse
handleToggleCollapseClick(d) {
this.toggleCollapse(d);
}
/************************************************************
* 数据操作,可提取出去
************************************************************/
// 展开/ | ****** | identifier_name |
treeFactory.js | : '15'},{
name: '欧洲游',
id: '11',
children: [{
name: '挪威游',
id: '111'
}, {
name: '冰岛游',
id: '112'
}]
}, {
name: '北美游',
id: '12',
children: [{
name: '美国游',
id: '121',
children: [{
name: '加州游',
id: '1211'
}, {
name: '纽约游',
id: '1212'
}]
}, {
name: '加拿大游',
id: '122'
}]
}, {
name: '亚洲游',
id: '13',
children: [{
name: '日本游',
id: '131'
}, {
name: '韩国游',
id: '132'
}, {
name: '新马泰游',
id: '133'
}]
}]
}
const R = 30
const width = 960
const height = 960
const fontSize = 12
const dx = 10
const dy = 160
function separation(a, b) {
if (a.parent === b.parent) {
return ((2*R + 1)*(a.value + b.value)/2) / dx
} else {
return 2
}
}
class Tree {
constructor(selector) {
this.data = null;
this.root = null;
this.container = selector ? document.querySelector(selector) : document.querySelector('body');
this.tree = d3.tree().nodeSize([dx, dy]).separation(separation)
this.diagonal = d3.linkHorizontal().x(d => d.y).y(d => d.x)
this.eventEmitter = Object.create(null);
// 事件绑定
this.handleNodeClick = this.handleNodeClick.bind(this)
this.handleEditClick = this.handleEditClick.bind(this)
this.handleAddClick = this.handleAddClick.bind(this)
this.handleDeleteClick = this.handleDeleteClick.bind(this)
this.handleToggleCollapseClick = this.handleToggleCollapseClick.bind(this)
}
/************************************************************
* eventEmitter
* type: edit, add, delete
************************************************************/
on(type, fn) {
if (this.eventEmitter[type]) {
this.eventEmitter[type].push(fn)
} else {
this.eventEmitter[type] = [fn];
}
}
clearEventEmitter() {
this.eventEmitter = null;
}
/************************************************************
* 初始化
************************************************************/
init(data) {
this.data = data;
this.initRoot(data);
this.initSVG();
if (this.root) {
this.render(this.root)
}
}
/************************************************************
* 初始化root
************************************************************/
initRoot() {
const root = d3.hierarchy(this.data).count();
root.x0 = dy / 2;
root.y0 = 0;
root.descendants().forEach((d, i) => {
d.id = d.data.id ? d.data.id : i;
d._children = d.children;
});
this.root = root;
}
/************************************************************
* 初始化SVG
************************************************************/
initSVG() {
this.svg = d3.create("svg")
.style("font", "12px sans-serif")
.style("user-select", "none");
this.gLink = this.svg.append("g")
.attr("fill", "none")
.attr("stroke", "#555")
.attr("stroke-opacity", 0.4)
.attr("stroke-width", 1.5);
this.gNode = this.svg.append("g")
.attr("cursor", "pointer");
this.container.appendChild(this.svg.node());
}
/************************************************************
* 更新树
************************************************************/
update(data, d) {
this.data = data;
this.initRoot()
this.render(d)
}
render(source) {
const root = this.root;
// Compute the new tree layout.
this.tree(root);
this.updateTransition();
// Update the nodes…
this.updateNodes(source);
// Update the links…
this.updateLinks(source);
// Stash the old positions for transition.
root.eachBefore(d => {
d.x0 = d.x;
d.y0 = d.y;
});
}
updateTransition() {
const svg = this.svg;
const duration = d3.event && d3.event.altKey ? 2500 : 250;
const height = this.height(); // svg height
const svgTranslateX = 2 * R;
const svgTranslateY = height/2 - R;
this.transition = this.svg.transition()
.duration(duration)
.attr("height", height)
.attr("viewBox", [-svgTranslateX, -svgTranslateY, width, height])
.tween("resize", window.ResizeObserver ? null : () => () => svg.dispatch("toggle"));
}
updateNodes(source) {
const transition = this.transition;
const nodes = this.root.descendants().reverse();
const node = this.gNode.selectAll("g")
.data(nodes, d => d.id);
// if showToolkit is true, show toolkit
const withToolKit = node.filter(d => d.showToolkit === true)
this.addOperationButtonToNode(withToolKit)
// Enter any new nodes at the parent's previous position.
const nodeEnter = node.enter().append("g")
.attr("transform", d => `translate(${source.y0},${source.x0})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0)
.on("click", this.handleNodeClick);
nodeEnter.append("circle")
.attr("r", R)
.attr("fill", d => d._children ? "#555" : "#999");
nodeEnter.append("text")
.attr("dy", "0.31em")
.attr("x", d => -(d.data.name.length * fontSize / 2))
.text(d => d.data.name)
.clone(true).lower()
.attr("stroke-linejoin", "round")
.attr("stroke-width", 3)
.attr("stroke", "white");
// Transition nodes to their new position.
const nodeUpdate = node.merge(nodeEnter).transition(transition)
.attr("transform", d => `translate(${d.y},${d.x})`)
.attr("fill-opacity", 1)
.attr("stroke-opacity", 1);
// Transition exiting nodes to the parent's new position.
const nodeExit = node.exit().transition(transition).remove()
.attr("transform", d => `translate(${source.y},${source.x})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0);
}
updateLinks(source) {
const transition = this.transition;
const links = this.root.links();
const link = this.gLink.selectAll("path")
.data(links, d => d.target.id);
// Enter any new links at the parent's previous position.
const linkEnter = link.enter().append("path")
.attr("d", d => {
const o = {x: source.x0, y: source.y0};
return this.diagonal({source: o, target: o});
});
// Transition links to their new position.
link.merge(linkEnter).transition(transition)
.attr("d", this.diagonal);
// Transition exiting nodes to the parent's new position.
link.exit().transition(transition).remove()
.attr("d", d => {
const o = {x: source.x, y: source.y};
return this.diagonal({source: o, target: o});
});
}
/************************************************************
* 公共函数,可提取出去
************************************************************/
height() {
if (this.root.value) {
return (2*R + dx/2) * this.root.value + 2*R
} else {
return height | ************************************************************/
// 节点点击事件
handleNodeClick(d) {
// toggle toolkit
this.toggleToolkit(d);
this.render(d);
}
// 编辑
handleEditClick(d) {
const editFnArr = this.eventEmitter.edit;
if (editFnArr) {
editFnArr.forEach(fn => fn.call(null, d))
}
}
// 添加
handleAddClick(d) {
const addFnArr = this.eventEmitter.add;
if (addFnArr) {
addFnArr.forEach(fn => fn.call(null, d))
}
}
// 删除
handleDeleteClick(d) {
const deleteFnArr = this.eventEmitter.delete;
if (deleteFnArr) {
deleteFnArr.forEach(fn => fn.call(null, d))
}
}
// toggle collapse
handleToggleCollapseClick(d) {
this.toggleCollapse(d);
}
/************************************************************
* 数据操作,可提取出去
************************************************************/
// 展开/收起 | }
}
/************************************************************
* 事件处理函数,可提取出去 | random_line_split |
treeFactory.js | '15'},{
name: '欧洲游',
id: '11',
children: [{
name: '挪威游',
id: '111'
}, {
name: '冰岛游',
id: '112'
}]
}, {
name: '北美游',
id: '12',
children: [{
name: '美国游',
id: '121',
children: [{
name: '加州游',
id: '1211'
}, {
name: '纽约游',
id: '1212'
}]
}, {
name: '加拿大游',
id: '122'
}]
}, {
name: '亚洲游',
id: '13',
children: [{
name: '日本游',
id: '131'
}, {
name: '韩国游',
id: '132'
}, {
name: '新马泰游',
id: '133'
}]
}]
}
const R = 30
const width = 960
const height = 960
const fontSize = 12
const dx = 10
const dy = 160
function separation(a, b) {
if (a.parent === b.parent) {
return ((2*R + 1)*(a.value + b.value)/2) / dx
} else {
return 2
}
}
class Tree {
constructor(selector) {
this.data = null;
this.root = null;
this.container = selector ? document.querySelector(selector) : document.querySelector('body');
this.tree = d3.tree().nodeSize([dx, dy]).separation(separation)
this.diagonal = d3.linkHorizontal().x(d => d.y).y(d => d.x)
this.eventEmitter = Object.create(null);
// 事件绑定
this.handleNodeClick = this.handleNodeClick.bind(this)
this.handleEditClick = this.handleEditClick.bind(this)
this.handleAddClick = this.handleAddClick.bind(this)
this.handleDeleteClick = this.handleDeleteClick.bind(this)
this.handleToggleCollapseClick = this.handleToggleCollapseClick.bind(this)
}
/************************************************************
* eventEmitter
* type: edit, add, delete
************************************************************/
on(type, fn) {
if (this.eventEmitter[type]) {
this.eventEmitter[type].push(fn)
} else {
this.eventEmitter[type] = [fn];
}
}
clearEventEmitter() {
this.eventEmitter = null;
}
/************************************************************
* 初始化
************************************************************/
init(data) {
this.data = data;
this.initRoot(data);
this.initSVG();
if (this.root) {
this.render(this.root)
}
}
/************************************************************
* 初始化root
********************************************************* | = d3.hierarchy(this.data).count();
root.x0 = dy / 2;
root.y0 = 0;
root.descendants().forEach((d, i) => {
d.id = d.data.id ? d.data.id : i;
d._children = d.children;
});
this.root = root;
}
/************************************************************
* 初始化SVG
************************************************************/
initSVG() {
this.svg = d3.create("svg")
.style("font", "12px sans-serif")
.style("user-select", "none");
this.gLink = this.svg.append("g")
.attr("fill", "none")
.attr("stroke", "#555")
.attr("stroke-opacity", 0.4)
.attr("stroke-width", 1.5);
this.gNode = this.svg.append("g")
.attr("cursor", "pointer");
this.container.appendChild(this.svg.node());
}
/************************************************************
* 更新树
************************************************************/
update(data, d) {
this.data = data;
this.initRoot()
this.render(d)
}
render(source) {
const root = this.root;
// Compute the new tree layout.
this.tree(root);
this.updateTransition();
// Update the nodes…
this.updateNodes(source);
// Update the links…
this.updateLinks(source);
// Stash the old positions for transition.
root.eachBefore(d => {
d.x0 = d.x;
d.y0 = d.y;
});
}
updateTransition() {
const svg = this.svg;
const duration = d3.event && d3.event.altKey ? 2500 : 250;
const height = this.height(); // svg height
const svgTranslateX = 2 * R;
const svgTranslateY = height/2 - R;
this.transition = this.svg.transition()
.duration(duration)
.attr("height", height)
.attr("viewBox", [-svgTranslateX, -svgTranslateY, width, height])
.tween("resize", window.ResizeObserver ? null : () => () => svg.dispatch("toggle"));
}
updateNodes(source) {
const transition = this.transition;
const nodes = this.root.descendants().reverse();
const node = this.gNode.selectAll("g")
.data(nodes, d => d.id);
// if showToolkit is true, show toolkit
const withToolKit = node.filter(d => d.showToolkit === true)
this.addOperationButtonToNode(withToolKit)
// Enter any new nodes at the parent's previous position.
const nodeEnter = node.enter().append("g")
.attr("transform", d => `translate(${source.y0},${source.x0})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0)
.on("click", this.handleNodeClick);
nodeEnter.append("circle")
.attr("r", R)
.attr("fill", d => d._children ? "#555" : "#999");
nodeEnter.append("text")
.attr("dy", "0.31em")
.attr("x", d => -(d.data.name.length * fontSize / 2))
.text(d => d.data.name)
.clone(true).lower()
.attr("stroke-linejoin", "round")
.attr("stroke-width", 3)
.attr("stroke", "white");
// Transition nodes to their new position.
const nodeUpdate = node.merge(nodeEnter).transition(transition)
.attr("transform", d => `translate(${d.y},${d.x})`)
.attr("fill-opacity", 1)
.attr("stroke-opacity", 1);
// Transition exiting nodes to the parent's new position.
const nodeExit = node.exit().transition(transition).remove()
.attr("transform", d => `translate(${source.y},${source.x})`)
.attr("fill-opacity", 0)
.attr("stroke-opacity", 0);
}
updateLinks(source) {
const transition = this.transition;
const links = this.root.links();
const link = this.gLink.selectAll("path")
.data(links, d => d.target.id);
// Enter any new links at the parent's previous position.
const linkEnter = link.enter().append("path")
.attr("d", d => {
const o = {x: source.x0, y: source.y0};
return this.diagonal({source: o, target: o});
});
// Transition links to their new position.
link.merge(linkEnter).transition(transition)
.attr("d", this.diagonal);
// Transition exiting nodes to the parent's new position.
link.exit().transition(transition).remove()
.attr("d", d => {
const o = {x: source.x, y: source.y};
return this.diagonal({source: o, target: o});
});
}
/************************************************************
* 公共函数,可提取出去
************************************************************/
height() {
if (this.root.value) {
return (2*R + dx/2) * this.root.value + 2*R
} else {
return height
}
}
/************************************************************
* 事件处理函数,可提取出去
************************************************************/
// 节点点击事件
handleNodeClick(d) {
// toggle toolkit
this.toggleToolkit(d);
this.render(d);
}
// 编辑
handleEditClick(d) {
const editFnArr = this.eventEmitter.edit;
if (editFnArr) {
editFnArr.forEach(fn => fn.call(null, d))
}
}
// 添加
handleAddClick(d) {
const addFnArr = this.eventEmitter.add;
if (addFnArr) {
addFnArr.forEach(fn => fn.call(null, d))
}
}
// 删除
handleDeleteClick(d) {
const deleteFnArr = this.eventEmitter.delete;
if (deleteFnArr) {
deleteFnArr.forEach(fn => fn.call(null, d))
}
}
// toggle collapse
handleToggleCollapseClick(d) {
this.toggleCollapse(d);
}
/************************************************************
* 数据操作,可提取出去
************************************************************/
// 展开/ | ***/
initRoot() {
const root | conditional_block |
SpinConfig.py | config = load(os.path.join(gameserver_dir(), 'config.json'))
def reload():
# reload config file
global config
try:
new_config = load(os.path.join(gameserver_dir(), 'config.json'))
config = new_config
except:
pass
# return identifier for this game (e.g. "mf" or "tr")
def game(override_game_id = None):
if override_game_id:
id = override_game_id
else:
id = config['game_id']
# strip off "test" suffix
if id.endswith('test'):
id = id[:-4]
return id
def game_id_long(override_game_id = None):
id = game(override_game_id=override_game_id)
return {'mf':'marsfrontier',
'mf2':'marsfrontier2',
'tr':'thunderrun',
'bfm':'battlefrontmars',
'dv':'daysofvalor',
'sg':'summonersgate',
'em':'warclanempire'}[id]
# return the path (relative to gameserver/) of the master gamedata file
def gamedata_filename(extension = '.json', locale = None, override_game_id = None):
if override_game_id:
gid = override_game_id
else:
gid = game()
return os.path.join(gameserver_dir(), '../gamedata/%s/built/gamedata-%s%s%s' % (gid, gid, ('-'+locale) if locale else '', extension))
# return the path (relative to gameserver/) to a single included gamedata source file
def gamedata_component_filename(name, override_game_id = None):
if override_game_id:
game_id = override_game_id
else:
game_id = game()
# check overlay built first
trial = os.path.join(gameserver_dir(), '../gamedata/%s/built/%s_%s' % (game_id, game_id, name))
if os.path.exists(trial): return trial
# check overlay non-built second
trial = os.path.join(gameserver_dir(), '../gamedata/%s/%s_%s' % (game_id, game_id, name))
if os.path.exists(trial): return trial
return os.path.join(gameserver_dir(), '../gamedata/%s' % name)
# return (bucket, prefix) for upcache files in S3
def upcache_s3_location(game_id):
return 'spinpunch-upcache', '%s-upcache' % game_id_long(game_id)
# return default location to look for this computer's AWS key file
def aws_key_file():
my_hostname = os.uname()[1].split('.')[0]
return os.path.join(os.getenv('HOME'), '.ssh', my_hostname+'-awssecret')
# gamedata locales to check for a specific user locale, in order of preference
def locales_to_try(locale):
if locale == 'null': return [None] # developer override
return [locale, 'en_US', None]
# misc. global tables (might want to move these to SpinUpcache later)
# COUNTRY TIERS - reflect average level of AD BID PRICES
# targeting based on http://nanigansblog.files.wordpress.com/2012/02/nanigans_facebookcountrytargeting_cpcreach3.png
# NOTE: some small countries (Bahrain, Brunei, etc) are omitted, they will be assigned to Tier 4
country_tier_map = {
'at': 1, 'dk': 1, 'fi': 1, 'gb': 1, 'nl': 1, 'no': 1, 'nz': 1, 'za': 1, 'au': 1, 'se': 1,
'ca': 2, 'us': 2,
'be': 3, 'br': 3, 'ch': 3, 'de': 3, 'es': 3, 'fr': 3, 'gr': 3, 'hk': 3, 'hu': 3, 'ie': 3, 'il': 3, 'it': 3, 'pe': 3, 'pr': 3, 'pt': 3, 'ro': 3, 'sa': 3, 'sg': 3, 'sk': 3, 've': 3,
'al': 4, 'ar': 4, 'ba': 4, 'bg': 4, 'cl': 4, 'co': 4, 'cr': 4, 'do': 4, 'dz': 4, 'eg': 4, 'ge': 4, 'gt': 4, 'hr': 4, 'id': 4, 'in': 4, 'jo': 4, 'lt': 4, 'ma': 4, 'me': 4, 'mk': 4, 'mx': 4, 'my': 4, 'ng': 4, 'pa': 4, 'ph': 4, 'pk': 4, 'pl': 4, 'rs': 4, 'sv': 4, 'th': 4, 'tn': 4, 'tr': 4, 'vn': 4,
'kr': 4, # for English-language games at least
'ae': 4, 'am': 4, 'ax': 4, 'ba': 4
}
# PRICE REGIONS - reflect average willingness to pay/price elasticity groups
price_region_map = {
'at': 'A', 'dk': 'A', 'fi': 'A', 'gb': 'A', 'nl': 'A', 'no': 'A', 'nz': 'A', 'za': 'A', 'au': 'A', 'se': 'A', 'kw': 'A', 'gg': 'A', 'im': 'A', 'qa': 'A', 'bh': 'A', 'mq': 'A',
'ca': 'B', 'us': 'B', 'is': 'B', 'ly': 'B', 'kr': 'B', 'tw': 'B',
'be': 'C', 'br': 'C', 'ch': 'C', 'de': 'C', 'es': 'C', 'fr': 'C', 'gr': 'C', 'hk': 'C', 'hu': 'C', 'ie': 'C', 'il': 'C', 'it': 'C', 'pe': 'C', 'pr': 'C', 'pt': 'C', 'ro': 'C', 'sa': 'C', 'sg': 'C', 'sk': 'C', 've': 'C',
'al': 'D', 'ar': 'D', 'ba': 'D', 'bg': 'D', 'cl': 'D', 'co': 'D', 'cr': 'D', 'do': 'D', 'dz': 'D', 'eg': 'D', 'ge': 'D', 'gt': 'D', 'hr': 'D', 'id': 'D', 'in': 'D', 'jo': 'D', 'lt': 'D', 'ma': 'D', 'me': 'D', 'mk': 'D', 'mx': 'D', 'my': 'D', 'ng': 'D', 'pa': 'D', 'ph': 'D', 'pk': 'D', 'pl': 'D', 'rs': 'D', 'sv': 'D', 'th': 'D', 'tn': 'D', 'tr': 'D', 'vn': 'D',
'ae': 'D', 'am': 'D', 'ax': 'D', 'ba': 'D', 'lb': 'D', 'np': 'D'
}
# FACAEBOOK GAME FAN PAGES - dictionary of fan page IDs for strategy games, to track user "likes"
FACEBOOK_GAME_FAN_PAGES_VERSION = 2 # increment this number each time a change is made, to avoid trusting stale data in upcache
FACEBOOK_GAME_FAN_PAGES = {
'mars_frontier':'235938246460875',
'thunder_run':'141835099310946',
'war_star_empire':'633274570056000',
'thunder_run_days_of_valor':'294870984023668',
'battlefront_mars':'1436033100000042',
'war_commander':'166402620131249',
'battle_pirates':'323061097715783',
'total_domination':'330939280268735',
'edgeworld':'329450857071583',
'vega_conflict':'349144321859865',
'light_nova':'153463478093125',
'wasteland_empires':'15 | e = os.getenv('SPIN_GAMESERVER')
if e: return e
return '../gameserver'
# global, used by code that imports SpinConfig | random_line_split |
|
SpinConfig.py |
# global, used by code that imports SpinConfig
config = load(os.path.join(gameserver_dir(), 'config.json'))
def reload():
# reload config file
global config
try:
new_config = load(os.path.join(gameserver_dir(), 'config.json'))
config = new_config
except:
pass
# return identifier for this game (e.g. "mf" or "tr")
def game(override_game_id = None):
if override_game_id:
id = override_game_id
else:
id = config['game_id']
# strip off "test" suffix
if id.endswith('test'):
id = id[:-4]
return id
def game_id_long(override_game_id = None):
id = game(override_game_id=override_game_id)
return {'mf':'marsfrontier',
'mf2':'marsfrontier2',
'tr':'thunderrun',
'bfm':'battlefrontmars',
'dv':'daysofvalor',
'sg':'summonersgate',
'em':'warclanempire'}[id]
# return the path (relative to gameserver/) of the master gamedata file
def gamedata_filename(extension = '.json', locale = None, override_game_id = None):
if override_game_id:
gid = override_game_id
else:
gid = game()
return os.path.join(gameserver_dir(), '../gamedata/%s/built/gamedata-%s%s%s' % (gid, gid, ('-'+locale) if locale else '', extension))
# return the path (relative to gameserver/) to a single included gamedata source file
def gamedata_component_filename(name, override_game_id = None):
if override_game_id:
game_id = override_game_id
else:
game_id = game()
# check overlay built first
trial = os.path.join(gameserver_dir(), '../gamedata/%s/built/%s_%s' % (game_id, game_id, name))
if os.path.exists(trial): return trial
# check overlay non-built second
trial = os.path.join(gameserver_dir(), '../gamedata/%s/%s_%s' % (game_id, game_id, name))
if os.path.exists(trial): return trial
return os.path.join(gameserver_dir(), '../gamedata/%s' % name)
# return (bucket, prefix) for upcache files in S3
def upcache_s3_location(game_id):
return 'spinpunch-upcache', '%s-upcache' % game_id_long(game_id)
# return default location to look for this computer's AWS key file
def aws_key_file():
my_hostname = os.uname()[1].split('.')[0]
return os.path.join(os.getenv('HOME'), '.ssh', my_hostname+'-awssecret')
# gamedata locales to check for a specific user locale, in order of preference
def locales_to_try(locale):
if locale == 'null': return [None] # developer override
return [locale, 'en_US', None]
# misc. global tables (might want to move these to SpinUpcache later)
# COUNTRY TIERS - reflect average level of AD BID PRICES
# targeting based on http://nanigansblog.files.wordpress.com/2012/02/nanigans_facebookcountrytargeting_cpcreach3.png
# NOTE: some small countries (Bahrain, Brunei, etc) are omitted, they will be assigned to Tier 4
country_tier_map = {
'at': 1, 'dk': 1, 'fi': 1, 'gb': 1, 'nl': 1, 'no': 1, 'nz': 1, 'za': 1, 'au': 1, 'se': 1,
'ca': 2, 'us': 2,
'be': 3, 'br': 3, 'ch': 3, 'de': 3, 'es': 3, 'fr': 3, 'gr': 3, 'hk': 3, 'hu': 3, 'ie': 3, 'il': 3, 'it': 3, 'pe': 3, 'pr': 3, 'pt': 3, 'ro': 3, 'sa': 3, 'sg': 3, 'sk': 3, 've': 3,
'al': 4, 'ar': 4, 'ba': 4, 'bg': 4, 'cl': 4, 'co': 4, 'cr': 4, 'do': 4, 'dz': 4, 'eg': 4, 'ge': 4, 'gt': 4, 'hr': 4, 'id': 4, 'in': 4, 'jo': 4, 'lt': 4, 'ma': 4, 'me': 4, 'mk': 4, 'mx': 4, 'my': 4, 'ng': 4, 'pa': 4, 'ph': 4, 'pk': 4, 'pl': 4, 'rs': 4, 'sv': 4, 'th': 4, 'tn': 4, 'tr': 4, 'vn': 4,
'kr': 4, # for English-language games at least
'ae': 4, 'am': 4, 'ax': 4, 'ba': 4
}
# PRICE REGIONS - reflect average willingness to pay/price elasticity groups
price_region_map = {
'at': 'A', 'dk': 'A', 'fi': 'A', 'gb': 'A', 'nl': 'A', 'no': 'A', 'nz': 'A', 'za': 'A', 'au': 'A', 'se': 'A', 'kw': 'A', 'gg': 'A', 'im': 'A', 'qa': 'A', 'bh': 'A', 'mq': 'A',
'ca': 'B', 'us': 'B', 'is': 'B', 'ly': 'B', 'kr': 'B', 'tw': 'B',
'be': 'C', 'br': 'C', 'ch': 'C', 'de': 'C', 'es': 'C', 'fr': 'C', 'gr': 'C', 'hk': 'C', 'hu': 'C', 'ie': 'C', 'il': 'C', 'it': 'C', 'pe': 'C', 'pr': 'C', 'pt': 'C', 'ro': 'C', 'sa': 'C', 'sg': 'C', 'sk': 'C', 've': 'C',
'al': 'D', 'ar': 'D', 'ba': 'D', 'bg': 'D', 'cl': 'D', 'co': 'D', 'cr': 'D', 'do': 'D', 'dz': 'D', 'eg': 'D', 'ge': 'D', 'gt': 'D', 'hr': 'D', 'id': 'D', 'in': 'D', 'jo': 'D', 'lt': 'D', 'ma': 'D', 'me': 'D', 'mk': 'D', 'mx': 'D', 'my': 'D', 'ng': 'D', 'pa': 'D', 'ph': 'D', 'pk': 'D', 'pl': 'D', 'rs': 'D', 'sv': 'D', 'th': 'D', 'tn': 'D', 'tr': 'D', 'vn': 'D',
'ae': 'D', 'am': 'D', 'ax': 'D', 'ba': 'D', 'lb': 'D', 'np': 'D'
}
# FACAEBOOK GAME FAN PAGES - dictionary of fan page IDs for strategy games, to track user "likes"
FACEBOOK_GAME_FAN_PAGES_VERSION = 2 # increment this number each time a change is made, to avoid trusting stale data in upcache
FACEBOOK_GAME_FAN_PAGES = {
'mars_frontier':'235938246460875',
'thunder_run':'141835099310946',
'war_star_empire':'633274570056000',
'thunder_run_days_of_valor':'294870984023668',
'battlefront_mars':'1436033100000042',
'war_commander':'166402620131249',
'battle_pirates':'323061097715783',
'total_domination':'330939280268735',
'edgeworld':'329450857071583',
'vega_conflict':'349144321859865',
'light_nova':'153463478093125',
'wasteland_empires':'1 | e = os.getenv('SPIN_GAMESERVER')
if e: return e
return '../gameserver' | identifier_body |
|
SpinConfig.py | 'battlefront_heroes':'127918567418514',
'red_crucible_2':'126605594124234',
'stormfall_age_of_war':'450552231662626',
'boom_beach':'249340185214120',
'world_of_tanks':'494440040376',
'war_thunder':'362712050429431'
}
def game_launch_date(override_game_id = None):
return { 'mf': 1326794980, # 2012 Jan 17
'tr': 1368662400, # (1368662400) 2013 May 16 Turkey test release, (1369891026) 2013 May 30 -Tier 1/2 release
'mf2': 1388096233, # 2013 Dec 26 - Tier 1/2 release
'bfm': 1407024000, # (1403728087) 2014 June 26 - Tier 4 release, (1407024000) 2014 August 3 - Tier 1/2 release
'sg': 1414403421, # (1414403421) 2014 Oct 27 server set up, but not opened yet
}[override_game_id or game()]
ACCOUNT_LAPSE_TIME = 7*24*60*60 # consider an account "lapsed" if this much time has passed since last logout
# originally 3 days, changed to 7 days on 2014 Nov 2
# NEW multi-interval account lapse tracking (not all code has been updated for this yet)
ACCOUNT_LAPSE_TIMES = {
'3d': 3*24*60*60,
'7d': 7*24*60*60,
'28d': 28*24*60*60,
}
AGE_GROUPS = {'17O13': '13-17',
'24O18': '18-24',
'34O25': '25-34',
'44O35': '35-44',
'54O45': '45-54',
'64O55': '55-64'}
def years_old_to_age_group(years):
if years >= 65: return 'MISSING'
elif years >= 55: return '64O55'
elif years >= 45: return '54O45'
elif years >= 35: return '44O35'
elif years >= 25: return '34O25'
elif years >= 18: return '24O18'
elif years >= 13: return '17O13'
else: return 'MISSING'
# return UNIX time counter for first second of this year/month/day
def cal_to_unix(ymd):
year, mon, mday = ymd
return calendar.timegm(time.struct_time([year, mon, mday, 0, 0, 0, -1, -1, -1]))
def unix_to_cal(unix):
st = time.gmtime(unix)
return st.tm_year, st.tm_mon, st.tm_mday
def pretty_print_time(sec):
d = int(sec/86400)
sec -= 86400*d
h = int(sec/3600)
sec -= 3600*h
m = int(sec/60)
sec -= 60*m
ret = ''
if d > 0:
ret += '%02dd' % d
if h > 0:
ret += '%02dh' % h
ret += '%02dm%02ds' % (m, sec)
return ret
# find current PvP season/week/day based on gamedata
# "seasons" = gamedata['matchmaking']['season_starts']
# t = time you want to find the season for
def get_pvp_season(seasons, t):
for i in xrange(len(seasons)):
if seasons[i] > t:
return i
return len(seasons)
# origin = gamedata['matchmaking']['week_origin']
# t = time you want to find the week for
def get_pvp_week(origin, t):
return int((t-origin)//(7*24*60*60))
def get_pvp_day(origin, t):
return int((t-origin)//(24*60*60))
# get mongodb connection info
# returns a dictionary d where
# d['connect_args'], d['connect_kwargs'] are the things you should pass to pymongo.MongoClient() to set up the connection
# d['dbname'] is the database where your stuff is, and d['table_prefix'] should be prepended to all collection names.
def get_mongodb_config(dbname):
# figure out parent/child relationships and implicit databases
parents = {}
implicit = set()
for name, data in config['mongodb_servers'].iteritems():
if 'delegate_tables' in data:
for expr, sub_name in data['delegate_tables'].iteritems():
parents[sub_name] = data
if sub_name not in config['mongodb_servers']:
implicit.add(sub_name)
if dbname not in config.get('mongodb_servers',{}) and (dbname not in implicit):
raise Exception('config.json: no mongodb_servers entry nor implicit entry for db '+dbname)
return parse_mongodb_config(dbname, config['mongodb_servers'].get(dbname, {}), parent = parents.get(dbname, None))
def get_credentials(filename):
filename = filename.replace('$HOME', os.getenv('HOME'))
try:
fd = open(filename, 'r')
username = fd.readline().strip()
password = fd.readline().strip()
except Exception as e:
raise Exception('config.json: error reading credentials file %s: %s' % (filename, e))
return username, password
def parse_mongodb_config(dbname, cfg, parent = None):
if parent is None: parent = {}
dbname = cfg.get('dbname', dbname) # note! parent's dbname does NOT override this!
credentials = cfg.get('credentials', parent.get('credentials', None))
if credentials:
username, password = get_credentials(credentials)
else:
username = cfg.get('username', parent.get('username', None))
password = cfg.get('password', parent.get('password', None))
host = cfg.get('host', parent.get('host', None))
port = cfg.get('port', parent.get('port', 27017))
if not (host and username and (password is not None)):
raise Exception('invalid mongodb config for "%s": %s' % (dbname, repr(cfg)))
table_prefix = cfg.get('table_prefix', parent.get('table_prefix', ''))
connect_url = 'mongodb://%s:%s@%s:%s/%s' % tuple([urllib.quote(x, '') for x in [username,password,host,str(port),dbname]])
return {'connect_args':[], 'connect_kwargs':{'host':connect_url},
'host':host, 'port':port, 'username':username, 'password':password,
'dbname': dbname, 'table_prefix': table_prefix, 'delegate_tables':cfg.get('delegate_tables',parent.get('delegate_tables', {})),
'maintenance_window': cfg.get('maintenance_window',None)}
def get_mysql_config(dbname):
if dbname not in config.get('mysql_servers',{}):
raise Exception('config.json: no mysql_servers entry for db '+dbname)
return parse_mysql_config(dbname, config['mysql_servers'][dbname])
def parse_mysql_config(dbname, cfg):
dbname = cfg.get('dbname', dbname)
if 'credentials' in cfg:
username, password = get_credentials(cfg['credentials'])
else:
username = cfg['username']
password = cfg['password']
port = cfg.get('port',3306)
table_prefix = cfg.get('table_prefix', '')
return {'connect_args':(cfg['host'], username, password, dbname), 'connect_kwargs':{'use_unicode': True, 'charset': 'utf8'},
'host':cfg['host'], 'port':port, 'username':username, 'password':password,
'dbname': dbname, 'table_prefix': table_prefix, 'maintenance_window': cfg.get('maintenance_window',None)}
def get_pgsql_config(dbname):
if dbname not in config.get('pgsql_servers',{}):
raise Exception('config.json: no pgsql_servers entry for db '+dbname)
return parse_pgsql_config(dbname, config['pgsql_servers'][dbname])
def | parse_pgsql_config | identifier_name |
|
SpinConfig.py | ': 'C', 'ch': 'C', 'de': 'C', 'es': 'C', 'fr': 'C', 'gr': 'C', 'hk': 'C', 'hu': 'C', 'ie': 'C', 'il': 'C', 'it': 'C', 'pe': 'C', 'pr': 'C', 'pt': 'C', 'ro': 'C', 'sa': 'C', 'sg': 'C', 'sk': 'C', 've': 'C',
'al': 'D', 'ar': 'D', 'ba': 'D', 'bg': 'D', 'cl': 'D', 'co': 'D', 'cr': 'D', 'do': 'D', 'dz': 'D', 'eg': 'D', 'ge': 'D', 'gt': 'D', 'hr': 'D', 'id': 'D', 'in': 'D', 'jo': 'D', 'lt': 'D', 'ma': 'D', 'me': 'D', 'mk': 'D', 'mx': 'D', 'my': 'D', 'ng': 'D', 'pa': 'D', 'ph': 'D', 'pk': 'D', 'pl': 'D', 'rs': 'D', 'sv': 'D', 'th': 'D', 'tn': 'D', 'tr': 'D', 'vn': 'D',
'ae': 'D', 'am': 'D', 'ax': 'D', 'ba': 'D', 'lb': 'D', 'np': 'D'
}
# FACAEBOOK GAME FAN PAGES - dictionary of fan page IDs for strategy games, to track user "likes"
FACEBOOK_GAME_FAN_PAGES_VERSION = 2 # increment this number each time a change is made, to avoid trusting stale data in upcache
FACEBOOK_GAME_FAN_PAGES = {
'mars_frontier':'235938246460875',
'thunder_run':'141835099310946',
'war_star_empire':'633274570056000',
'thunder_run_days_of_valor':'294870984023668',
'battlefront_mars':'1436033100000042',
'war_commander':'166402620131249',
'battle_pirates':'323061097715783',
'total_domination':'330939280268735',
'edgeworld':'329450857071583',
'vega_conflict':'349144321859865',
'light_nova':'153463478093125',
'wasteland_empires':'151467404968108',
'soldiers_inc':'482177521871037',
'warzone':'172417542894731',
'contract_wars':'207598916027565',
'admiral':'321969256735',
'ninja_kingdom':'170996059738810',
'throne_rush':'221609908005798',
'backyard_monsters':'304561816235995',
'knights_clash_heroes':'180681162111398',
'under_fire':'641964019177419',
'dragons_of_atlantis':'325789367434394',
'kingdoms_of_camelot':'308882969123771',
'pirates_tides_of_fortune':'109358109188776',
'social_empires':'162772593825182',
'war_mercenaries':'105098466327305',
'clash_of_clans':'447775968580065',
'sparta_war_of_empires':'674913419214092',
'jungle_heat':'642817249078505',
'battlefront_heroes':'127918567418514',
'red_crucible_2':'126605594124234',
'stormfall_age_of_war':'450552231662626',
'boom_beach':'249340185214120',
'world_of_tanks':'494440040376',
'war_thunder':'362712050429431'
}
def game_launch_date(override_game_id = None):
return { 'mf': 1326794980, # 2012 Jan 17
'tr': 1368662400, # (1368662400) 2013 May 16 Turkey test release, (1369891026) 2013 May 30 -Tier 1/2 release
'mf2': 1388096233, # 2013 Dec 26 - Tier 1/2 release
'bfm': 1407024000, # (1403728087) 2014 June 26 - Tier 4 release, (1407024000) 2014 August 3 - Tier 1/2 release
'sg': 1414403421, # (1414403421) 2014 Oct 27 server set up, but not opened yet
}[override_game_id or game()]
ACCOUNT_LAPSE_TIME = 7*24*60*60 # consider an account "lapsed" if this much time has passed since last logout
# originally 3 days, changed to 7 days on 2014 Nov 2
# NEW multi-interval account lapse tracking (not all code has been updated for this yet)
ACCOUNT_LAPSE_TIMES = {
'3d': 3*24*60*60,
'7d': 7*24*60*60,
'28d': 28*24*60*60,
}
AGE_GROUPS = {'17O13': '13-17',
'24O18': '18-24',
'34O25': '25-34',
'44O35': '35-44',
'54O45': '45-54',
'64O55': '55-64'}
def years_old_to_age_group(years):
if years >= 65: return 'MISSING'
elif years >= 55: return '64O55'
elif years >= 45: return '54O45'
elif years >= 35: return '44O35'
elif years >= 25: return '34O25'
elif years >= 18: return '24O18'
elif years >= 13: return '17O13'
else: return 'MISSING'
# return UNIX time counter for first second of this year/month/day
def cal_to_unix(ymd):
year, mon, mday = ymd
return calendar.timegm(time.struct_time([year, mon, mday, 0, 0, 0, -1, -1, -1]))
def unix_to_cal(unix):
st = time.gmtime(unix)
return st.tm_year, st.tm_mon, st.tm_mday
def pretty_print_time(sec):
d = int(sec/86400)
sec -= 86400*d
h = int(sec/3600)
sec -= 3600*h
m = int(sec/60)
sec -= 60*m
ret = ''
if d > 0:
ret += '%02dd' % d
if h > 0:
| ret += '%02dh' % h | conditional_block |
|
chaintool.py | little')
itms=int(lsoffset/34)
for i in range(itms):
index=i*34
if(index+34==itms*34):
nextoffset=len(data)
else:
nextoffset=int.from_bytes(data[index:index+2],'little')
self.items.append([data[index+2:index+34],data[lsoffset:nextoffset]])
lsoffset=nextoffset
def get(self, name):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
return i[1]
return -1
def add(self, name, data):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
i[1]=data
return
self.items.append([name, data])
def mine(block):
i=0
info("Mining a new block...")
timer=millis()+1000
cnt=millis()
off=0
while not block.mineOnce():
if(millis()>timer):
timer=millis()+1000
info(str(int((i-off)/(millis()-cnt)))+" kH/s")
cnt=millis()
off=i
i+=1
info("Took "+str(i)+" attempts to mine block with hash "+hex(block.hash))
def createBlock(newdata):
global TopBlock
if len(newdata)>maxBlockSize:
error("Block too big!")
return
TopBlock=Block(newdata, lsblock=TopBlock)
mine(TopBlock)
blocks.append(TopBlock)
return TopBlock
def createDaughterBlock(newdata,cmt):
global TopBlock
daughter=Block(newdata, lsblock=TopBlock)
mine(daughter)
createBlock(cmt+":DAUGHTER"+str(myPublicKey.to_bytes(16, 'little'))+hex(daughter.hash))
daughter_blocks.append(daughter)
return daughter
def addData(name,data):
global current_age, current_data
if len(data)>(maxBlockSize-(3*length+28)):
blk=Block(data)
mine_remote(blk)
blk=blocks[len(blocks)-1]
daughter_blocks.append(blk)
addData(name, b'DBR:'+blk.hash.to_bytes(length,'little'))
else:
current_data.add(name,data)
pkd=current_data.pack()
if len(pkd)>=(maxBlockSize-(3*length+28)):
current_data.items.pop()
mine_remote(Block(current_data.pack()))
current_data.items=[[name,data]]
current_age=millis()
elif(current_age+120000<millis() and len(pkd)>1024):
mine_remote(Block(pkd))
current_data.items=[]
current_age=millis()
def blocktimecheck():
global current_age, current_data
while(True):
if(current_age+120000<millis() and len(current_data.pack())>1024):
mine_remote(Block(current_data.pack()))
current_data.items=[]
current_age=millis()
ls_sub_blk=None
def client_thread(cs, ip):
global solved, ls_sub_blk
index=0
while True:
data=b''
length=0
try:
tmp=cs.recv(8)
if not tmp:
break
elif tmp==b'PING':
data=b'PING'
else:
length= int.from_bytes(tmp,'little')
except:
print('['+ip+"] Error getting a response; assuming connection broken")
break
while len(data)<length:
data+=cs.recv(min(8192,length-len(data)))
reply=b'DONE'
if data==b"PING":
info("["+ip+"] Pinged.")
reply=b'OK'
elif data[:2]==b'I=':
data=data[2:]
index=int.from_bytes(data[:8],"little")
reply=b'DONE'
info("["+ip+"] I set to "+str(index))
elif data==b"GBLK":
info("["+ip+"] Block requested.")
if len(blocks)>index:
chunk=blocks[index].pack()
else:
chunk=b"NONE"
reply=chunk
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:4]==b'FSB?':
hsh=int.from_bytes(data[4:],'little')
info("["+ip+"] Getting daughter block with hash "+hex(hsh))
for i in daughter_blocks:
if i.hash==hsh:
reply=i.pack()
break
if reply==b'DONE':
reply=b'NONE'
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:5]==b'NODES':
info("["+ip+"] Requested node list...")
li=''
for i in nodes:
li+=i.ip+','
reply=li[:-1].encode()
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:6]==b'APPEND':
off=int.from_bytes(data[6:7],'little')
g=(data[7:off],data[off:])
print("["+ip+'] adding their data:')
print("["+ip+'] > name='+str(g[0],'UTF-8'))
print("["+ip+'] > data='+str(g[1][:8],'UTF-8')+'...')
threading.Thread(target=addData, args=g).start()
elif data[:6]==b'STORE':
data=data[6:]
pos=int.from_bytes(data[:8],"little")+8
threading.Thread(target=mine_remote_daughter, args=(data[8:pos],data[pos:]))
elif data[:5]==b'IAMNODE':
info("["+ip+"] "+ip+" is a node...")
done=False
for i in nodes:
if i.ip==ip:
done=True
if (not done) and len(nodes)<=node_limit:
for i in ports:
nodes.append(Client(ip,i))
reply=b'OK'
elif data[:3]==b'SUB':
try:
info("["+ip+"] Submitting a new block to the chain...")
b=Block('')
b.unpack(data[3:])
if b.validate() and blocks[len(blocks)-1].hash==b.lshash:
blocks.append(b)
solved=True
ls_sub_blk=b
save()
elif b.validate() and blocks[0].hash==b.lshash:
daughter_blocks.append(b)
solved=True
ls_sub_blk=b
save()
else:
warn(" That was an invalid block.")
warn(" > Hash was "+hex(b.hash))
except:
error(" Error unpacking block from client.")
#print("response start: "+reply.decode())
cs.sendall(reply)
info("Client "+ip+" has disconnected.")
cs.close()
def start_new_thread(a, b):
what=threading.Thread(target=a, args=b)
what.start()
running_node=False
cs_list=[]
def server(port):
global running_node, cs_list
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
try:
s.bind(('', port))
info("Node on port "+str(port)+" started.")
running_node=True
except:
error("Cannot run 2 nodes on same machine.")
return
# become a server socket
s.listen(5)
while True:
(cs, adr) = s.accept()
info("CONNECTED: "+adr[0]+':'+str(adr[1]))
start_new_thread(client_thread ,(cs,adr[0]))
cs_list.append(cs)
if not solved:
info("Sending new client mining job...")
cs.sendall(mining_reply)
class Client():
def __init__(self, ip, port):
self.ip=ip
info("Connecting to "+str(ip)+':'+str(port)+"...")
self.conn=socket.socket()
self.conn.connect((ip, port))
self.conn.sendall(b"PING") | self.inUse=True
else:
info(" Node discovered: "+ip+':'+str(port))
self.inUse=False
if running_node:
self.inUse=True # stop polling processes
self.avsend(b'IAMNODE')
self.conn.recv(2)
self.inUse=False # resume polling processes
threading.Thread(target=node_mining_thread,args=(self,))
def avsend(self, data):
self.conn.sendall(len(data).to_bytes(8, 'little'))
self.conn.sendall(data)
def avrec(self):
data=b''
length=0
tmp=self.conn.recv(8)
length=int.from_bytes(tmp,'little')
while len(data)<length:
data+=self.conn.recv(min(8192,length-len(data)))
return data
def getDaughterBlock(self, h_ash):
try:
self.inUse=True # stop polling processes
self.avsend(b'FSB?'+h_ash.to_bytes(length,'little'))
| if self.conn.recv(4).decode()!='OK':
error(str(ip)+" is not a node.")
self.conn.close() | random_line_split |
chaintool.py | little')
itms=int(lsoffset/34)
for i in range(itms):
index=i*34
if(index+34==itms*34):
nextoffset=len(data)
else:
nextoffset=int.from_bytes(data[index:index+2],'little')
self.items.append([data[index+2:index+34],data[lsoffset:nextoffset]])
lsoffset=nextoffset
def get(self, name):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
return i[1]
return -1
def add(self, name, data):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
i[1]=data
return
self.items.append([name, data])
def mine(block):
i=0
info("Mining a new block...")
timer=millis()+1000
cnt=millis()
off=0
while not block.mineOnce():
if(millis()>timer):
timer=millis()+1000
info(str(int((i-off)/(millis()-cnt)))+" kH/s")
cnt=millis()
off=i
i+=1
info("Took "+str(i)+" attempts to mine block with hash "+hex(block.hash))
def createBlock(newdata):
global TopBlock
if len(newdata)>maxBlockSize:
error("Block too big!")
return
TopBlock=Block(newdata, lsblock=TopBlock)
mine(TopBlock)
blocks.append(TopBlock)
return TopBlock
def createDaughterBlock(newdata,cmt):
global TopBlock
daughter=Block(newdata, lsblock=TopBlock)
mine(daughter)
createBlock(cmt+":DAUGHTER"+str(myPublicKey.to_bytes(16, 'little'))+hex(daughter.hash))
daughter_blocks.append(daughter)
return daughter
def addData(name,data):
global current_age, current_data
if len(data)>(maxBlockSize-(3*length+28)):
blk=Block(data)
mine_remote(blk)
blk=blocks[len(blocks)-1]
daughter_blocks.append(blk)
addData(name, b'DBR:'+blk.hash.to_bytes(length,'little'))
else:
current_data.add(name,data)
pkd=current_data.pack()
if len(pkd)>=(maxBlockSize-(3*length+28)):
current_data.items.pop()
mine_remote(Block(current_data.pack()))
current_data.items=[[name,data]]
current_age=millis()
elif(current_age+120000<millis() and len(pkd)>1024):
mine_remote(Block(pkd))
current_data.items=[]
current_age=millis()
def blocktimecheck():
global current_age, current_data
while(True):
if(current_age+120000<millis() and len(current_data.pack())>1024):
mine_remote(Block(current_data.pack()))
current_data.items=[]
current_age=millis()
ls_sub_blk=None
def | (cs, ip):
global solved, ls_sub_blk
index=0
while True:
data=b''
length=0
try:
tmp=cs.recv(8)
if not tmp:
break
elif tmp==b'PING':
data=b'PING'
else:
length= int.from_bytes(tmp,'little')
except:
print('['+ip+"] Error getting a response; assuming connection broken")
break
while len(data)<length:
data+=cs.recv(min(8192,length-len(data)))
reply=b'DONE'
if data==b"PING":
info("["+ip+"] Pinged.")
reply=b'OK'
elif data[:2]==b'I=':
data=data[2:]
index=int.from_bytes(data[:8],"little")
reply=b'DONE'
info("["+ip+"] I set to "+str(index))
elif data==b"GBLK":
info("["+ip+"] Block requested.")
if len(blocks)>index:
chunk=blocks[index].pack()
else:
chunk=b"NONE"
reply=chunk
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:4]==b'FSB?':
hsh=int.from_bytes(data[4:],'little')
info("["+ip+"] Getting daughter block with hash "+hex(hsh))
for i in daughter_blocks:
if i.hash==hsh:
reply=i.pack()
break
if reply==b'DONE':
reply=b'NONE'
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:5]==b'NODES':
info("["+ip+"] Requested node list...")
li=''
for i in nodes:
li+=i.ip+','
reply=li[:-1].encode()
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:6]==b'APPEND':
off=int.from_bytes(data[6:7],'little')
g=(data[7:off],data[off:])
print("["+ip+'] adding their data:')
print("["+ip+'] > name='+str(g[0],'UTF-8'))
print("["+ip+'] > data='+str(g[1][:8],'UTF-8')+'...')
threading.Thread(target=addData, args=g).start()
elif data[:6]==b'STORE':
data=data[6:]
pos=int.from_bytes(data[:8],"little")+8
threading.Thread(target=mine_remote_daughter, args=(data[8:pos],data[pos:]))
elif data[:5]==b'IAMNODE':
info("["+ip+"] "+ip+" is a node...")
done=False
for i in nodes:
if i.ip==ip:
done=True
if (not done) and len(nodes)<=node_limit:
for i in ports:
nodes.append(Client(ip,i))
reply=b'OK'
elif data[:3]==b'SUB':
try:
info("["+ip+"] Submitting a new block to the chain...")
b=Block('')
b.unpack(data[3:])
if b.validate() and blocks[len(blocks)-1].hash==b.lshash:
blocks.append(b)
solved=True
ls_sub_blk=b
save()
elif b.validate() and blocks[0].hash==b.lshash:
daughter_blocks.append(b)
solved=True
ls_sub_blk=b
save()
else:
warn(" That was an invalid block.")
warn(" > Hash was "+hex(b.hash))
except:
error(" Error unpacking block from client.")
#print("response start: "+reply.decode())
cs.sendall(reply)
info("Client "+ip+" has disconnected.")
cs.close()
def start_new_thread(a, b):
what=threading.Thread(target=a, args=b)
what.start()
running_node=False
cs_list=[]
def server(port):
global running_node, cs_list
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
try:
s.bind(('', port))
info("Node on port "+str(port)+" started.")
running_node=True
except:
error("Cannot run 2 nodes on same machine.")
return
# become a server socket
s.listen(5)
while True:
(cs, adr) = s.accept()
info("CONNECTED: "+adr[0]+':'+str(adr[1]))
start_new_thread(client_thread ,(cs,adr[0]))
cs_list.append(cs)
if not solved:
info("Sending new client mining job...")
cs.sendall(mining_reply)
class Client():
def __init__(self, ip, port):
self.ip=ip
info("Connecting to "+str(ip)+':'+str(port)+"...")
self.conn=socket.socket()
self.conn.connect((ip, port))
self.conn.sendall(b"PING")
if self.conn.recv(4).decode()!='OK':
error(str(ip)+" is not a node.")
self.conn.close()
self.inUse=True
else:
info(" Node discovered: "+ip+':'+str(port))
self.inUse=False
if running_node:
self.inUse=True # stop polling processes
self.avsend(b'IAMNODE')
self.conn.recv(2)
self.inUse=False # resume polling processes
threading.Thread(target=node_mining_thread,args=(self,))
def avsend(self, data):
self.conn.sendall(len(data).to_bytes(8, 'little'))
self.conn.sendall(data)
def avrec(self):
data=b''
length=0
tmp=self.conn.recv(8)
length=int.from_bytes(tmp,'little')
while len(data)<length:
data+=self.conn.recv(min(8192,length-len(data)))
return data
def getDaughterBlock(self, h_ash):
try:
self.inUse=True # stop polling processes
self.avsend(b'FSB?'+h_ash.to_bytes(length,'little'))
| client_thread | identifier_name |
chaintool.py | little')
itms=int(lsoffset/34)
for i in range(itms):
index=i*34
if(index+34==itms*34):
nextoffset=len(data)
else:
nextoffset=int.from_bytes(data[index:index+2],'little')
self.items.append([data[index+2:index+34],data[lsoffset:nextoffset]])
lsoffset=nextoffset
def get(self, name):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
return i[1]
return -1
def add(self, name, data):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
i[1]=data
return
self.items.append([name, data])
def mine(block):
i=0
info("Mining a new block...")
timer=millis()+1000
cnt=millis()
off=0
while not block.mineOnce():
if(millis()>timer):
timer=millis()+1000
info(str(int((i-off)/(millis()-cnt)))+" kH/s")
cnt=millis()
off=i
i+=1
info("Took "+str(i)+" attempts to mine block with hash "+hex(block.hash))
def createBlock(newdata):
global TopBlock
if len(newdata)>maxBlockSize:
error("Block too big!")
return
TopBlock=Block(newdata, lsblock=TopBlock)
mine(TopBlock)
blocks.append(TopBlock)
return TopBlock
def createDaughterBlock(newdata,cmt):
global TopBlock
daughter=Block(newdata, lsblock=TopBlock)
mine(daughter)
createBlock(cmt+":DAUGHTER"+str(myPublicKey.to_bytes(16, 'little'))+hex(daughter.hash))
daughter_blocks.append(daughter)
return daughter
def addData(name,data):
global current_age, current_data
if len(data)>(maxBlockSize-(3*length+28)):
blk=Block(data)
mine_remote(blk)
blk=blocks[len(blocks)-1]
daughter_blocks.append(blk)
addData(name, b'DBR:'+blk.hash.to_bytes(length,'little'))
else:
current_data.add(name,data)
pkd=current_data.pack()
if len(pkd)>=(maxBlockSize-(3*length+28)):
current_data.items.pop()
mine_remote(Block(current_data.pack()))
current_data.items=[[name,data]]
current_age=millis()
elif(current_age+120000<millis() and len(pkd)>1024):
mine_remote(Block(pkd))
current_data.items=[]
current_age=millis()
def blocktimecheck():
global current_age, current_data
while(True):
if(current_age+120000<millis() and len(current_data.pack())>1024):
mine_remote(Block(current_data.pack()))
current_data.items=[]
current_age=millis()
ls_sub_blk=None
def client_thread(cs, ip):
global solved, ls_sub_blk
index=0
while True:
data=b''
length=0
try:
tmp=cs.recv(8)
if not tmp:
break
elif tmp==b'PING':
data=b'PING'
else:
length= int.from_bytes(tmp,'little')
except:
print('['+ip+"] Error getting a response; assuming connection broken")
break
while len(data)<length:
data+=cs.recv(min(8192,length-len(data)))
reply=b'DONE'
if data==b"PING":
info("["+ip+"] Pinged.")
reply=b'OK'
elif data[:2]==b'I=':
data=data[2:]
index=int.from_bytes(data[:8],"little")
reply=b'DONE'
info("["+ip+"] I set to "+str(index))
elif data==b"GBLK":
info("["+ip+"] Block requested.")
if len(blocks)>index:
chunk=blocks[index].pack()
else:
chunk=b"NONE"
reply=chunk
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:4]==b'FSB?':
hsh=int.from_bytes(data[4:],'little')
info("["+ip+"] Getting daughter block with hash "+hex(hsh))
for i in daughter_blocks:
if i.hash==hsh:
reply=i.pack()
break
if reply==b'DONE':
reply=b'NONE'
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:5]==b'NODES':
info("["+ip+"] Requested node list...")
li=''
for i in nodes:
li+=i.ip+','
reply=li[:-1].encode()
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:6]==b'APPEND':
off=int.from_bytes(data[6:7],'little')
g=(data[7:off],data[off:])
print("["+ip+'] adding their data:')
print("["+ip+'] > name='+str(g[0],'UTF-8'))
print("["+ip+'] > data='+str(g[1][:8],'UTF-8')+'...')
threading.Thread(target=addData, args=g).start()
elif data[:6]==b'STORE':
data=data[6:]
pos=int.from_bytes(data[:8],"little")+8
threading.Thread(target=mine_remote_daughter, args=(data[8:pos],data[pos:]))
elif data[:5]==b'IAMNODE':
info("["+ip+"] "+ip+" is a node...")
done=False
for i in nodes:
if i.ip==ip:
done=True
if (not done) and len(nodes)<=node_limit:
for i in ports:
nodes.append(Client(ip,i))
reply=b'OK'
elif data[:3]==b'SUB':
try:
info("["+ip+"] Submitting a new block to the chain...")
b=Block('')
b.unpack(data[3:])
if b.validate() and blocks[len(blocks)-1].hash==b.lshash:
blocks.append(b)
solved=True
ls_sub_blk=b
save()
elif b.validate() and blocks[0].hash==b.lshash:
daughter_blocks.append(b)
solved=True
ls_sub_blk=b
save()
else:
warn(" That was an invalid block.")
warn(" > Hash was "+hex(b.hash))
except:
error(" Error unpacking block from client.")
#print("response start: "+reply.decode())
cs.sendall(reply)
info("Client "+ip+" has disconnected.")
cs.close()
def start_new_thread(a, b):
what=threading.Thread(target=a, args=b)
what.start()
running_node=False
cs_list=[]
def server(port):
|
class Client():
def __init__(self, ip, port):
self.ip=ip
info("Connecting to "+str(ip)+':'+str(port)+"...")
self.conn=socket.socket()
self.conn.connect((ip, port))
self.conn.sendall(b"PING")
if self.conn.recv(4).decode()!='OK':
error(str(ip)+" is not a node.")
self.conn.close()
self.inUse=True
else:
info(" Node discovered: "+ip+':'+str(port))
self.inUse=False
if running_node:
self.inUse=True # stop polling processes
self.avsend(b'IAMNODE')
self.conn.recv(2)
self.inUse=False # resume polling processes
threading.Thread(target=node_mining_thread,args=(self,))
def avsend(self, data):
self.conn.sendall(len(data).to_bytes(8, 'little'))
self.conn.sendall(data)
def avrec(self):
data=b''
length=0
tmp=self.conn.recv(8)
length=int.from_bytes(tmp,'little')
while len(data)<length:
data+=self.conn.recv(min(8192,length-len(data)))
return data
def getDaughterBlock(self, h_ash):
try:
self.inUse=True # stop polling processes
self.avsend(b'FSB?'+h_ash.to_bytes(length,'little'))
| global running_node, cs_list
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
try:
s.bind(('', port))
info("Node on port "+str(port)+" started.")
running_node=True
except:
error("Cannot run 2 nodes on same machine.")
return
# become a server socket
s.listen(5)
while True:
(cs, adr) = s.accept()
info("CONNECTED: "+adr[0]+':'+str(adr[1]))
start_new_thread(client_thread ,(cs,adr[0]))
cs_list.append(cs)
if not solved:
info("Sending new client mining job...")
cs.sendall(mining_reply) | identifier_body |
chaintool.py | little')
itms=int(lsoffset/34)
for i in range(itms):
index=i*34
if(index+34==itms*34):
nextoffset=len(data)
else:
nextoffset=int.from_bytes(data[index:index+2],'little')
self.items.append([data[index+2:index+34],data[lsoffset:nextoffset]])
lsoffset=nextoffset
def get(self, name):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
return i[1]
return -1
def add(self, name, data):
for i in self.items:
if i[0]==name or (name+b'\x00'*(32-len(name)))==i[0]:
i[1]=data
return
self.items.append([name, data])
def mine(block):
i=0
info("Mining a new block...")
timer=millis()+1000
cnt=millis()
off=0
while not block.mineOnce():
if(millis()>timer):
timer=millis()+1000
info(str(int((i-off)/(millis()-cnt)))+" kH/s")
cnt=millis()
off=i
i+=1
info("Took "+str(i)+" attempts to mine block with hash "+hex(block.hash))
def createBlock(newdata):
global TopBlock
if len(newdata)>maxBlockSize:
error("Block too big!")
return
TopBlock=Block(newdata, lsblock=TopBlock)
mine(TopBlock)
blocks.append(TopBlock)
return TopBlock
def createDaughterBlock(newdata,cmt):
global TopBlock
daughter=Block(newdata, lsblock=TopBlock)
mine(daughter)
createBlock(cmt+":DAUGHTER"+str(myPublicKey.to_bytes(16, 'little'))+hex(daughter.hash))
daughter_blocks.append(daughter)
return daughter
def addData(name,data):
global current_age, current_data
if len(data)>(maxBlockSize-(3*length+28)):
blk=Block(data)
mine_remote(blk)
blk=blocks[len(blocks)-1]
daughter_blocks.append(blk)
addData(name, b'DBR:'+blk.hash.to_bytes(length,'little'))
else:
current_data.add(name,data)
pkd=current_data.pack()
if len(pkd)>=(maxBlockSize-(3*length+28)):
current_data.items.pop()
mine_remote(Block(current_data.pack()))
current_data.items=[[name,data]]
current_age=millis()
elif(current_age+120000<millis() and len(pkd)>1024):
mine_remote(Block(pkd))
current_data.items=[]
current_age=millis()
def blocktimecheck():
global current_age, current_data
while(True):
if(current_age+120000<millis() and len(current_data.pack())>1024):
mine_remote(Block(current_data.pack()))
current_data.items=[]
current_age=millis()
ls_sub_blk=None
def client_thread(cs, ip):
global solved, ls_sub_blk
index=0
while True:
data=b''
length=0
try:
tmp=cs.recv(8)
if not tmp:
break
elif tmp==b'PING':
data=b'PING'
else:
length= int.from_bytes(tmp,'little')
except:
print('['+ip+"] Error getting a response; assuming connection broken")
break
while len(data)<length:
data+=cs.recv(min(8192,length-len(data)))
reply=b'DONE'
if data==b"PING":
info("["+ip+"] Pinged.")
reply=b'OK'
elif data[:2]==b'I=':
|
elif data==b"GBLK":
info("["+ip+"] Block requested.")
if len(blocks)>index:
chunk=blocks[index].pack()
else:
chunk=b"NONE"
reply=chunk
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:4]==b'FSB?':
hsh=int.from_bytes(data[4:],'little')
info("["+ip+"] Getting daughter block with hash "+hex(hsh))
for i in daughter_blocks:
if i.hash==hsh:
reply=i.pack()
break
if reply==b'DONE':
reply=b'NONE'
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:5]==b'NODES':
info("["+ip+"] Requested node list...")
li=''
for i in nodes:
li+=i.ip+','
reply=li[:-1].encode()
reply=len(reply).to_bytes(8, 'little')+reply
elif data[:6]==b'APPEND':
off=int.from_bytes(data[6:7],'little')
g=(data[7:off],data[off:])
print("["+ip+'] adding their data:')
print("["+ip+'] > name='+str(g[0],'UTF-8'))
print("["+ip+'] > data='+str(g[1][:8],'UTF-8')+'...')
threading.Thread(target=addData, args=g).start()
elif data[:6]==b'STORE':
data=data[6:]
pos=int.from_bytes(data[:8],"little")+8
threading.Thread(target=mine_remote_daughter, args=(data[8:pos],data[pos:]))
elif data[:5]==b'IAMNODE':
info("["+ip+"] "+ip+" is a node...")
done=False
for i in nodes:
if i.ip==ip:
done=True
if (not done) and len(nodes)<=node_limit:
for i in ports:
nodes.append(Client(ip,i))
reply=b'OK'
elif data[:3]==b'SUB':
try:
info("["+ip+"] Submitting a new block to the chain...")
b=Block('')
b.unpack(data[3:])
if b.validate() and blocks[len(blocks)-1].hash==b.lshash:
blocks.append(b)
solved=True
ls_sub_blk=b
save()
elif b.validate() and blocks[0].hash==b.lshash:
daughter_blocks.append(b)
solved=True
ls_sub_blk=b
save()
else:
warn(" That was an invalid block.")
warn(" > Hash was "+hex(b.hash))
except:
error(" Error unpacking block from client.")
#print("response start: "+reply.decode())
cs.sendall(reply)
info("Client "+ip+" has disconnected.")
cs.close()
def start_new_thread(a, b):
what=threading.Thread(target=a, args=b)
what.start()
running_node=False
cs_list=[]
def server(port):
global running_node, cs_list
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
try:
s.bind(('', port))
info("Node on port "+str(port)+" started.")
running_node=True
except:
error("Cannot run 2 nodes on same machine.")
return
# become a server socket
s.listen(5)
while True:
(cs, adr) = s.accept()
info("CONNECTED: "+adr[0]+':'+str(adr[1]))
start_new_thread(client_thread ,(cs,adr[0]))
cs_list.append(cs)
if not solved:
info("Sending new client mining job...")
cs.sendall(mining_reply)
class Client():
def __init__(self, ip, port):
self.ip=ip
info("Connecting to "+str(ip)+':'+str(port)+"...")
self.conn=socket.socket()
self.conn.connect((ip, port))
self.conn.sendall(b"PING")
if self.conn.recv(4).decode()!='OK':
error(str(ip)+" is not a node.")
self.conn.close()
self.inUse=True
else:
info(" Node discovered: "+ip+':'+str(port))
self.inUse=False
if running_node:
self.inUse=True # stop polling processes
self.avsend(b'IAMNODE')
self.conn.recv(2)
self.inUse=False # resume polling processes
threading.Thread(target=node_mining_thread,args=(self,))
def avsend(self, data):
self.conn.sendall(len(data).to_bytes(8, 'little'))
self.conn.sendall(data)
def avrec(self):
data=b''
length=0
tmp=self.conn.recv(8)
length=int.from_bytes(tmp,'little')
while len(data)<length:
data+=self.conn.recv(min(8192,length-len(data)))
return data
def getDaughterBlock(self, h_ash):
try:
self.inUse=True # stop polling processes
self.avsend(b'FSB?'+h_ash.to_bytes(length,'little'))
| data=data[2:]
index=int.from_bytes(data[:8],"little")
reply=b'DONE'
info("["+ip+"] I set to "+str(index)) | conditional_block |
parsers.py | pathname2url(relate(obj.file_path, self.base_path))
self._store.append(obj)
return obj
except Exception as e:
LOGGER.error(e)
LOGGER.error('Exception occurred while creating an object for %s' % url)
return None # return unmodified
def default_handler(self, elem, attr, url, pos):
"""Handles any link type <a> <link> <script> <style> <style url>.
Note: Default handler function structures makes use of .rel_path attribute
which is completely internal and any usage depending on this attribute
may not work properly.
"""
obj = self._base_handler(elem, attr, url, pos)
if obj is None:
return
if attr is None:
new = elem.text[:pos] + obj.rel_path + elem.text[len(url) + pos:]
elem.text = new
else:
cur = elem.get(attr)
if not pos and len(cur) == len(url):
new = obj.rel_path # most common case
else:
new = cur[:pos] + obj.rel_path + cur[pos + len(url):]
elem.set(attr, new)
LOGGER.info("Remapped url of the file: %s to the path: %s " % (url, obj.rel_path))
self._urlMap[url] = obj.rel_path
return obj
def handle(self, elem, attr, url, pos):
"""Base handler function."""
if url.startswith(u'#') or url.startswith(u'java') or \
url.startswith(u'data') or not url.strip('/') or not url.strip():
return url
if not self.base_url:
raise AttributeError("Url attributes are unset!")
_handler = self.default_handler(elem, attr, url, pos)
if not _handler:
LOGGER.debug("No handler found for the link of type %s !" % elem.tag)
return url # return unmodified
else:
return _handler
class WebPage(BaseParser, object):
"""Provides scraping and parsing and saving ability in one class."""
def __init__(self, url, project_folder=None, project_name=None, encoding=None,
force_decoding=False, HTML=None, url_handler=None, **kwargs):
self.original_url = url
self._url = url if HTML else None
self._request = None
config.setup_config(url, project_folder, project_name, **kwargs)
if not HTML and (not self.request or not self.request.ok):
raise InvalidUrlError("Provided url didn't work %s" % url)
self._url_obj = None
self._url_handler = url_handler
super(WebPage, self).__init__(
element=HTML,
url=self.url,
default_encoding=encoding or 'utf-8' if HTML else self.request.encoding,
HTML=HTML or self.request.content,
)
self.force_decoding = force_decoding
if not self.force_decoding:
self._useDefaultDecoder = True
@property
def url(self):
"""Returns a url as reported by the server."""
if self._url is None:
self._url = self.request.url
return self._url
@url.setter
def url(self, new_url):
self._url = new_url
@property
def url_obj(self):
"""Returns an Url() object made from the self.url string.
:returns: Url() object"""
if self._url_obj is None:
self._url_obj = Url(self.url)
self._url_obj.base_path = config['project_folder']
self._url_obj.default_filename = 'index.html'
self._url_obj._unique_fn_required = False
return self._url_obj
@property
def url_handler(self):
if self._url_handler is None:
self._url_handler = UrlHandler(self.url, self.url_obj.file_path)
return self._url_handler
@property
def request(self):
"""Makes a http request to the server and sets the .http_request attribute to
returned response."""
if not self._request:
self._request = SESSION.get(self.original_url, stream=True)
if self._request is None:
raise InvalidUrlError("Webpage couldn't be loaded from url %s" % self.original_url)
return self._request
@request.setter
def request(self, requestsResponseObject):
"""Sets the base"""
@property
def htmlStream(self):
"""Returns a stream of data fetched from the objects url attribute."""
return io.BytesIO(self.html)
def linkedElements(self, tags=None):
"""Returns every linked file object as :class: `lxml.html.Element` (multiple times)."""
for elem in self.url_handler.elements:
if not tags:
yield elem
else:
if elem.tag in tags:
yield elem
def _extractLinks(self):
"""Rewrites url in document root."""
# `lxml.html` object has a `.iterlinks` function which is crucial for this
# task to be completed.
if self.lxml is None:
raise RuntimeError("Couldn't generate a etree object for the url %s" % self.url)
# stores the etree.html object generated by the lxml in the attribute
for i in self.lxml.iterlinks():
self.url_handler.handle(*i)
def _remapImages(self):
"""Rewrites <img> attributes if it have an srcset type attribute which prevents rendering of img
from its original src attribute url."""
if self.lxml is None:
raise RuntimeError("Couldn't rewrite images for the url %s" % self.url)
set_url = re.compile(r'((?:https?:\/|)[\w\/\.\\_-]+)')
for elem in self.lxml.xpath('.//img[@*]'):
_keys = elem.attrib.keys()
LOGGER.debug("Pre-Attributes for the imgs")
LOGGER.debug(elem.attrib)
if 'src' in _keys: # element would be catched later while saving files
elem.attrib.update({'data-src': '', 'data-srcset': '', 'srcset':''})
elif 'data-src' in _keys:
elem.attrib.update({'data-src': '', 'data-srcset': '', 'srcset': '', 'src': elem.attrib['data-src']})
elif 'data-srcset' in _keys:
_first_url = set_url.findall(elem.attrib.get('data-srcset'))[0]
elem.attrib.update({'data-srcset': '', 'data-src': '', 'srcset': '', 'src': _first_url})
elif 'srcset' in _keys:
_first_url = set_url.findall(elem.attrib.get('srcset'))[0]
elem.attrib.update({'data-srcset': '', 'data-src': '', 'srcset': '', 'src': _first_url})
else:
pass # unknown case
LOGGER.debug("Remapped Attributes of the img.")
LOGGER.debug(elem.attrib)
def get(self, url, use_global_session=True, **requestskwargs):
"""Fetches the Html content from Internet.
:param url: url of the webpage to fetch
:param use_global_session: if you would like later http requests made to server to follow the
same configuration as you provided then leave it to 'True' else if you want
only single http request to follow these configuration set it to 'False'.
:param **requestskwargs: keyword arguments which `requests` module may accept.
"""
if use_global_session:
self.html = SESSION.get(url, **requestskwargs).content
else:
self.html = requests.get(url, **requestskwargs).content
def save_html(self, file_name=None, raw_html=True):
"""Saves the html of the page to a default or specified file.
:param file_name: path of the file to write the contents to
:param raw_html: whether write the unmodified html or the rewritten html
"""
if raw_html:
with open(file_name or self.url_obj.file_path, 'wb') as fh:
fh.write(self.raw_html)
else:
self.lxml.getroottree().write(file_name or self.url_obj.file_path, method="html")
def save_assets(self, base_path=None, reset_html=True):
"""Save only the linked files to the disk.
:param base_path: folder in which to store the files.
:param reset_html: whether to write modified file locations to the html content
of this object
"""
if base_path and not os.path.isdir(base_path):
raise ValueError("Provided path is not a valid directory! %" % base_path)
self._remapImages()
self._extractLinks()
for elem in self.url_handler.elements:
try:
if base_path:
elem.base_path = base_path
elem.save_file()
except Exception as e:
LOGGER.error("Linked file generated an error upon saving!")
LOGGER.error(e)
pass
if reset_html:
self._lxml = None # reset the ElementTree
def save_complete(self):
"""Saves the complete html page to a file and also writes its linked files to the disk."""
self.save_assets(reset_html=False)
# new_file(self.url_obj.file_path, content=tostring(self.lxml, encoding=self.encoding)) | self.lxml.getroottree().write(self.url_obj.file_path, method="html")
self._lxml = None # reset the tree | random_line_split |
|
parsers.py | obj = self._base_handler(elem, attr, url, pos)
if obj is None:
return
if attr is None:
new = elem.text[:pos] + obj.rel_path + elem.text[len(url) + pos:]
elem.text = new
else:
cur = elem.get(attr)
if not pos and len(cur) == len(url):
new = obj.rel_path # most common case
else:
new = cur[:pos] + obj.rel_path + cur[pos + len(url):]
elem.set(attr, new)
LOGGER.info("Remapped url of the file: %s to the path: %s " % (url, obj.rel_path))
self._urlMap[url] = obj.rel_path
return obj
def handle(self, elem, attr, url, pos):
"""Base handler function."""
if url.startswith(u'#') or url.startswith(u'java') or \
url.startswith(u'data') or not url.strip('/') or not url.strip():
return url
if not self.base_url:
raise AttributeError("Url attributes are unset!")
_handler = self.default_handler(elem, attr, url, pos)
if not _handler:
LOGGER.debug("No handler found for the link of type %s !" % elem.tag)
return url # return unmodified
else:
return _handler
class WebPage(BaseParser, object):
"""Provides scraping and parsing and saving ability in one class."""
def __init__(self, url, project_folder=None, project_name=None, encoding=None,
force_decoding=False, HTML=None, url_handler=None, **kwargs):
self.original_url = url
self._url = url if HTML else None
self._request = None
config.setup_config(url, project_folder, project_name, **kwargs)
if not HTML and (not self.request or not self.request.ok):
raise InvalidUrlError("Provided url didn't work %s" % url)
self._url_obj = None
self._url_handler = url_handler
super(WebPage, self).__init__(
element=HTML,
url=self.url,
default_encoding=encoding or 'utf-8' if HTML else self.request.encoding,
HTML=HTML or self.request.content,
)
self.force_decoding = force_decoding
if not self.force_decoding:
self._useDefaultDecoder = True
@property
def url(self):
"""Returns a url as reported by the server."""
if self._url is None:
self._url = self.request.url
return self._url
@url.setter
def url(self, new_url):
self._url = new_url
@property
def url_obj(self):
"""Returns an Url() object made from the self.url string.
:returns: Url() object"""
if self._url_obj is None:
self._url_obj = Url(self.url)
self._url_obj.base_path = config['project_folder']
self._url_obj.default_filename = 'index.html'
self._url_obj._unique_fn_required = False
return self._url_obj
@property
def url_handler(self):
if self._url_handler is None:
self._url_handler = UrlHandler(self.url, self.url_obj.file_path)
return self._url_handler
@property
def request(self):
"""Makes a http request to the server and sets the .http_request attribute to
returned response."""
if not self._request:
self._request = SESSION.get(self.original_url, stream=True)
if self._request is None:
raise InvalidUrlError("Webpage couldn't be loaded from url %s" % self.original_url)
return self._request
@request.setter
def request(self, requestsResponseObject):
"""Sets the base"""
@property
def htmlStream(self):
"""Returns a stream of data fetched from the objects url attribute."""
return io.BytesIO(self.html)
def linkedElements(self, tags=None):
"""Returns every linked file object as :class: `lxml.html.Element` (multiple times)."""
for elem in self.url_handler.elements:
if not tags:
yield elem
else:
if elem.tag in tags:
yield elem
def _extractLinks(self):
"""Rewrites url in document root."""
# `lxml.html` object has a `.iterlinks` function which is crucial for this
# task to be completed.
if self.lxml is None:
raise RuntimeError("Couldn't generate a etree object for the url %s" % self.url)
# stores the etree.html object generated by the lxml in the attribute
for i in self.lxml.iterlinks():
self.url_handler.handle(*i)
def _remapImages(self):
"""Rewrites <img> attributes if it have an srcset type attribute which prevents rendering of img
from its original src attribute url."""
if self.lxml is None:
raise RuntimeError("Couldn't rewrite images for the url %s" % self.url)
set_url = re.compile(r'((?:https?:\/|)[\w\/\.\\_-]+)')
for elem in self.lxml.xpath('.//img[@*]'):
_keys = elem.attrib.keys()
LOGGER.debug("Pre-Attributes for the imgs")
LOGGER.debug(elem.attrib)
if 'src' in _keys: # element would be catched later while saving files
elem.attrib.update({'data-src': '', 'data-srcset': '', 'srcset':''})
elif 'data-src' in _keys:
elem.attrib.update({'data-src': '', 'data-srcset': '', 'srcset': '', 'src': elem.attrib['data-src']})
elif 'data-srcset' in _keys:
_first_url = set_url.findall(elem.attrib.get('data-srcset'))[0]
elem.attrib.update({'data-srcset': '', 'data-src': '', 'srcset': '', 'src': _first_url})
elif 'srcset' in _keys:
_first_url = set_url.findall(elem.attrib.get('srcset'))[0]
elem.attrib.update({'data-srcset': '', 'data-src': '', 'srcset': '', 'src': _first_url})
else:
pass # unknown case
LOGGER.debug("Remapped Attributes of the img.")
LOGGER.debug(elem.attrib)
def get(self, url, use_global_session=True, **requestskwargs):
"""Fetches the Html content from Internet.
:param url: url of the webpage to fetch
:param use_global_session: if you would like later http requests made to server to follow the
same configuration as you provided then leave it to 'True' else if you want
only single http request to follow these configuration set it to 'False'.
:param **requestskwargs: keyword arguments which `requests` module may accept.
"""
if use_global_session:
self.html = SESSION.get(url, **requestskwargs).content
else:
self.html = requests.get(url, **requestskwargs).content
def save_html(self, file_name=None, raw_html=True):
"""Saves the html of the page to a default or specified file.
:param file_name: path of the file to write the contents to
:param raw_html: whether write the unmodified html or the rewritten html
"""
if raw_html:
with open(file_name or self.url_obj.file_path, 'wb') as fh:
fh.write(self.raw_html)
else:
self.lxml.getroottree().write(file_name or self.url_obj.file_path, method="html")
def save_assets(self, base_path=None, reset_html=True):
"""Save only the linked files to the disk.
:param base_path: folder in which to store the files.
:param reset_html: whether to write modified file locations to the html content
of this object
"""
if base_path and not os.path.isdir(base_path):
raise ValueError("Provided path is not a valid directory! %" % base_path)
self._remapImages()
self._extractLinks()
for elem in self.url_handler.elements:
try:
if base_path:
elem.base_path = base_path
elem.save_file()
except Exception as e:
LOGGER.error("Linked file generated an error upon saving!")
LOGGER.error(e)
pass
if reset_html:
self._lxml = None # reset the ElementTree
def save_complete(self):
"""Saves the complete html page to a file and also writes its linked files to the disk."""
self.save_assets(reset_html=False)
# new_file(self.url_obj.file_path, content=tostring(self.lxml, encoding=self.encoding))
self.lxml.getroottree().write(self.url_obj.file_path, method="html")
self._lxml = None # reset the tree
class Element(BaseParser):
"""An element of HTML.
:param element: The element from which to base the parsing upon.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param default_encoding: Which encoding to default to.
"""
def __init__(self, element, url, default_encoding=None):
super(Element, self).__init__(element=element, url=url, default_encoding=default_encoding)
self.element = element
self.tag = element.tag
self.lineno = element.sourceline
self._attrs = None
def | __repr__ | identifier_name |
|
parsers.py | self.html"""
if not isinstance(HTML, str):
raise TypeError
self._html = HTML.decode(self.encoding, errors='xmlcharrefreplace')
def decode_html(self, html_string):
converted = UnicodeDammit(html_string)
if not converted.unicode_markup:
raise UnicodeDecodeError("Failed to detect encoding, tried [%s]", ','.join(converted.tried_encodings))
self.encoding = converted.original_encoding
return converted.unicode_markup
def encode(self, encoding=None, errors='xmlcharrefreplace'):
"""Returns the html of this :class: encoded with specified encoding."""
return self.html.encode(encoding=encoding, errors=errors)
@property
def encoding(self):
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc):
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self):
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def text(self):
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self):
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector="*", containing=None, clean=False, first=False,
_encoding=None):
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
if not isinstance(selector, str):
raise TypeError("Expected string, got %r" % type(selector))
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = list(elements)
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = list(elements)
elements = []
for element in elements_copy:
element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))
elements.append(element)
if first and len(elements) > 0:
return elements[0]
else:
return elements
def xpath(self, selector, clean=False, first=False, _encoding=None):
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
if not isinstance(selector, str):
raise TypeError("Expected string, got %r" % type(selector))
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, lxml.etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = list(elements)
elements = []
for element in elements_copy:
element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))
elements.append(element)
if first and len(elements) > 0:
return elements[0]
else:
return elements
def search(self, template):
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
if not isinstance(template, str):
raise TypeError("Expected string, got %r" % type(template))
return parse_search(template, self.html)
def search_all(self, template):
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
if not isinstance(template, str):
raise TypeError("Expected string, got %r" % type(template))
return [r for r in findall(template, self.html)]
class UrlHandler:
"""Handles different url types in the webpage."""
def __init__(self, base_url=None, base_path=None):
self.base_url = base_url
self.base_path = base_path
self._store = []
self._urlMap = {}
LOGGER.info("Url Handler initiated with base_url: %s and base_path: %s" % (base_url, base_path))
@property
def elements(self):
return self._store
def _base_handler(self, elem, attr, url, pos):
| obj.rel_path = pathname2url(relate(obj.file_path, self.base_path))
self._store.append(obj)
return obj
except Exception as e:
LOGGER.error(e)
LOGGER.error('Exception occurred while creating an object for %s' % url)
return None # return unmodified
def default_handler(self, elem, attr, url, pos):
"""Handles any link type <a> <link> <script> <style> <style url>.
Note: Default handler function structures makes use of .rel_path attribute
which is completely internal and any usage depending on this attribute
may not work properly.
"""
obj = self._base_handler(elem, attr, url, pos)
if obj is None:
return
if attr is None:
new = elem.text[:pos] + obj.rel_path + elem.text[len(url) + pos:]
elem.text = new
else:
cur = elem.get(attr)
if not pos and len(cur) == len(url):
new = obj.rel_path # most common case
else:
new = cur[:pos] + obj.rel_path + cur[pos + len(url):]
elem.set(attr, new)
LOGGER.info("Remapped url of the file: %s to the path: %s " % (url, obj.rel_path))
self._urlMap[url] = obj.rel_path
return obj
def handle(self, elem, attr, url, pos):
"""Base handler function."""
if url.startswith(u'#') or url.startswith(u'java') or \
url.startswith(u'data') or not url.strip('/') or not url.strip():
return url
if not self.base_url:
raise AttributeError("Url attributes are unset!")
| """Can handle <img>, <link>, <script> :class: `lxml.html.Element` object."""
LOGGER.debug("Handling url %s" % url)
if url.startswith(u"#") or url.startswith(u"data:") or url.startswith(u'javascript'):
return None # not valid link
try:
if elem.tag == 'link':
obj = LinkTag(url)
elif elem.tag == 'script':
obj = ScriptTag(url)
elif elem.tag == 'img':
obj = ImgTag(url)
elif elem.tag == 'a':
obj = AnchorTag(url)
else:
obj = Asset(url)
obj.base_url = self.base_url
obj.tag = attr | identifier_body |
parsers.py | self.html"""
if not isinstance(HTML, str):
raise TypeError
self._html = HTML.decode(self.encoding, errors='xmlcharrefreplace')
def decode_html(self, html_string):
converted = UnicodeDammit(html_string)
if not converted.unicode_markup:
raise UnicodeDecodeError("Failed to detect encoding, tried [%s]", ','.join(converted.tried_encodings))
self.encoding = converted.original_encoding
return converted.unicode_markup
def encode(self, encoding=None, errors='xmlcharrefreplace'):
"""Returns the html of this :class: encoded with specified encoding."""
return self.html.encode(encoding=encoding, errors=errors)
@property
def encoding(self):
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
|
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc):
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self):
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def text(self):
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self):
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector="*", containing=None, clean=False, first=False,
_encoding=None):
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
if not isinstance(selector, str):
raise TypeError("Expected string, got %r" % type(selector))
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = list(elements)
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = list(elements)
elements = []
for element in elements_copy:
element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))
elements.append(element)
if first and len(elements) > 0:
return elements[0]
else:
return elements
def xpath(self, selector, clean=False, first=False, _encoding=None):
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
if not isinstance(selector, str):
raise TypeError("Expected string, got %r" % type(selector))
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, lxml.etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = list(elements)
elements = []
for element in elements_copy:
element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))
elements.append(element)
if first and len(elements) > 0:
return elements[0]
else:
return elements
def search(self, template):
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
if not isinstance(template, str):
raise TypeError("Expected string, got %r" % type(template))
return parse_search(template, self.html)
def search_all(self, template):
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
if not isinstance(template, str):
raise TypeError("Expected string, got %r" % type(template))
return [r for r in findall(template, self.html)]
class UrlHandler:
"""Handles different url types in the webpage."""
def __init__(self, base_url=None, base_path=None):
self.base_url = base_url
self.base_path = base_path
self._store = []
self._urlMap = {}
LOGGER.info("Url Handler initiated with base_url: %s and base_path: %s" % (base_url, base_path))
@property
def elements(self):
return self._store
def _base_handler(self, elem, attr, url, pos):
"""Can handle <img>, <link>, <script> :class: `lxml.html.Element` object."""
LOGGER.debug("Handling url %s" % url)
if url.startswith(u"#") or url.startswith(u"data:") or url.startswith(u'javascript'):
return None # not valid link
try:
if elem.tag == 'link':
obj = LinkTag(url)
elif elem.tag == 'script':
obj = ScriptTag(url)
elif elem.tag == 'img':
obj = ImgTag(url)
elif elem.tag == 'a':
obj = AnchorTag(url)
else:
obj = Asset(url)
obj.base_url = self.base_url
obj.tag = attr
obj.rel_path = pathname2url(relate(obj.file_path, self.base_path))
self._store.append(obj)
return obj
except Exception as e:
LOGGER.error(e)
LOGGER.error('Exception occurred while creating an object for %s' % url)
return None # return unmodified
def default_handler(self, elem, attr, url, pos):
"""Handles any link type <a> <link> <script> <style> <style url>.
Note: Default handler function structures makes use of .rel_path attribute
which is completely internal and any usage depending on this attribute
may not work properly.
"""
obj = self._base_handler(elem, attr, url, pos)
if obj is None:
return
if attr is None:
new = elem.text[:pos] + obj.rel_path + elem.text[len(url) + pos:]
elem.text = new
else:
cur = elem.get(attr)
if not pos and len(cur) == len(url):
new = obj.rel_path # most common case
else:
new = cur[:pos] + obj.rel_path + cur[pos + len(url):]
elem.set(attr, new)
LOGGER.info("Remapped url of the file: %s to the path: %s " % (url, obj.rel_path))
self._urlMap[url] = obj.rel_path
return obj
def handle(self, elem, attr, url, pos):
"""Base handler function."""
if url.startswith(u'#') or url.startswith(u'java') or \
url.startswith(u'data') or not url.strip('/') or not url.strip():
return url
if not self.base_url:
raise AttributeError("Url attributes are unset!")
| return self._encoding | conditional_block |
blockchain1.py | requests
from uuid import uuid4
from urllib.parse import urlparse
# Building a Blockchain
class Blockchain:
def __init__(self):
self.chain = [] # our main block chain
# now we will create the list of transation which will record the all transactions
self.transactions = []
# create_block used to create the block in blockchain so it is executed only when the block is mined(meaning it has winnnig proof_of_work=proof) proof=0 and previous_hash='0' for the genesis block
self.create_block(proof=0, previous_hash='0')
# nodes will contains the unique identifier of the address of all nodes in p2p network
self.nodes = set() # we have taken set() instead of list because we know that address are randomly generated by uuid4 to avoid duplicacy in it
# part1
def create_block(self, proof, previous_hash):
block = { # dictionary of python data structure
'index': len(self.chain)+1,
'timestamp': str(datetime.datetime.now()),
'proof': proof, # works like a nounce of block stops when we reach at or below the target
'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(
str(new_proof**2-previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's
# hash of a block is created after generating block thats we have only use previous_hash because its already created
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
# reference of first block stored genesis block
|
# functions used to get add the transactions to the lists
def add_transaction(self, senders, receiver, amount):
self.transactions.append({
'senders': senders,
'receiver': receiver,
'amount': amount
})
previous_block = self.get_previous_block()
# +1 beacause before mining the transaction are added so new_block index will be +1 then previous
return previous_block['index']+1
# part-1 ends
# part-3--> dealing with decentarlized application and transactions
# this function allow us to add different nodes to chain
def add_node(self, address): # generating the decentarlized application
# we need to parse the url before adding it
parsed_url = urlparse(address)
# .netloc gives us the unique identifier of the node address removing the unrequired part from it
self.nodes.add(parsed_url.netloc)
# this function help us to solve the problem of consensus protocols (competing chain)
def replace_chain(self):
# this variable help us to find the length of longest chain among different network
max_length = len(self.chain)
longest_chain = None
network = self.nodes # this variable will hold the address of all the nodes in network
for node in network:
# we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length
# using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200: # this ids the vode chaeck something is received in request
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# this will happen in every node of network
if longest_chain:
# if this chain is shorter than otherit will be updated
self.chain = longest_chain
return True
# if this chain is only longest in network than return false and no update
return False
# part-3 ends
# Mining our Blockchain
app = Flask(__name__)
# Creating a Blockchain
# creating the instance of blockchain
blockchain = Blockchain()
# Mining the blockchain
# create an random and unique address for the node on port 5000
# this is the address used by to send the whale coin when the miner mines the wahle coin
node_address = str(uuid4()).replace('-', '')
# part-2
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
# miners price
# usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software
blockchain.add_transaction(node_address, 'Bhavjot', 1)
# when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
# getting all blocks in chain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# custom message
@app.route('/', methods=['GET'])
def custom_message():
response = {
'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'
}
return jsonify(response), 200
# part-2 ends
# creating the transactions
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
# this will help us to extract te post request made in postman like req.params.name in express
json = request.get_json()
# this will hep us to check that all the parameters are present or not for adding the transactions
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(
json['sender'], json['receiver'], json['amount'])
# when the block is mined all the transations in lists is added to block
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json() # we will get request message send from postman
# {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node) # add our nodes to network
response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳 | previous_block = chain[0]
block_index = 1 # required for iteration
while block_index < len(chain):
block = chain[block_index] # cuurent block
# checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
# verfying the proof of block with the data proof and previous proof it is easy then creating the proof
hash_operation = hashlib.sha256(
str(proof**2 - previous_proof**2).encode()).hexdigest()
# the more is zero's the more is harder to mine the block
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True | identifier_body |
blockchain1.py | requests
from uuid import uuid4
from urllib.parse import urlparse
# Building a Blockchain
class Blockchain:
def __init__(self):
self.chain = [] # our main block chain
# now we will create the list of transation which will record the all transactions
self.transactions = []
# create_block used to create the block in blockchain so it is executed only when the block is mined(meaning it has winnnig proof_of_work=proof) proof=0 and previous_hash='0' for the genesis block
self.create_block(proof=0, previous_hash='0')
# nodes will contains the unique identifier of the address of all nodes in p2p network
self.nodes = set() # we have taken set() instead of list because we know that address are randomly generated by uuid4 to avoid duplicacy in it
# part1
def create_block(self, proof, previous_hash):
block = { # dictionary of python data structure
'index': len(self.chain)+1,
'timestamp': str(datetime.datetime.now()),
'proof': proof, # works like a nounce of block stops when we reach at or below the target
'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(
str(new_proof**2-previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's
# hash of a block is created after generating block thats we have only use previous_hash because its already created
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
# reference of first block stored genesis block
previous_block = chain[0]
block_index = 1 # required for iteration
while block_index < len(chain):
block = chain[block_index] # cuurent block
# checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
# verfying the proof of block with the data proof and previous proof it is easy then creating the proof
hash_operation = hashlib.sha256(
str(proof**2 - previous_proof**2).encode()).hexdigest()
# the more is zero's the more is harder to mine the block
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# functions used to get add the transactions to the lists
def add_transaction(self, senders, receiver, amount):
self.transactions.append({
'senders': senders,
'receiver': receiver,
'amount': amount
})
previous_block = self.get_previous_block()
# +1 beacause before mining the transaction are added so new_block index will be +1 then previous
return previous_block['index']+1
# part-1 ends
# part-3--> dealing with decentarlized application and transactions
# this function allow us to add different nodes to chain
def add_node(self, address): # generating the decentarlized application
# we need to parse the url before adding it
parsed_url = urlparse(address)
# .netloc gives us the unique identifier of the node address removing the unrequired part from it
self.nodes.add(parsed_url.netloc)
# this function help us to solve the problem of consensus protocols (competing chain)
def | (self):
# this variable help us to find the length of longest chain among different network
max_length = len(self.chain)
longest_chain = None
network = self.nodes # this variable will hold the address of all the nodes in network
for node in network:
# we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length
# using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200: # this ids the vode chaeck something is received in request
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# this will happen in every node of network
if longest_chain:
# if this chain is shorter than otherit will be updated
self.chain = longest_chain
return True
# if this chain is only longest in network than return false and no update
return False
# part-3 ends
# Mining our Blockchain
app = Flask(__name__)
# Creating a Blockchain
# creating the instance of blockchain
blockchain = Blockchain()
# Mining the blockchain
# create an random and unique address for the node on port 5000
# this is the address used by to send the whale coin when the miner mines the wahle coin
node_address = str(uuid4()).replace('-', '')
# part-2
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
# miners price
# usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software
blockchain.add_transaction(node_address, 'Bhavjot', 1)
# when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
# getting all blocks in chain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# custom message
@app.route('/', methods=['GET'])
def custom_message():
response = {
'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'
}
return jsonify(response), 200
# part-2 ends
# creating the transactions
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
# this will help us to extract te post request made in postman like req.params.name in express
json = request.get_json()
# this will hep us to check that all the parameters are present or not for adding the transactions
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(
json['sender'], json['receiver'], json['amount'])
# when the block is mined all the transations in lists is added to block
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json() # we will get request message send from postman
# {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node) # add our nodes to network
response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳 | replace_chain | identifier_name |
blockchain1.py | .datetime.now()),
'proof': proof, # works like a nounce of block stops when we reach at or below the target
'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(
str(new_proof**2-previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's
# hash of a block is created after generating block thats we have only use previous_hash because its already created
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
# reference of first block stored genesis block
previous_block = chain[0]
block_index = 1 # required for iteration
while block_index < len(chain):
block = chain[block_index] # cuurent block
# checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
# verfying the proof of block with the data proof and previous proof it is easy then creating the proof
hash_operation = hashlib.sha256(
str(proof**2 - previous_proof**2).encode()).hexdigest()
# the more is zero's the more is harder to mine the block
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# functions used to get add the transactions to the lists
def add_transaction(self, senders, receiver, amount):
self.transactions.append({
'senders': senders,
'receiver': receiver,
'amount': amount
})
previous_block = self.get_previous_block()
# +1 beacause before mining the transaction are added so new_block index will be +1 then previous
return previous_block['index']+1
# part-1 ends
# part-3--> dealing with decentarlized application and transactions
# this function allow us to add different nodes to chain
def add_node(self, address): # generating the decentarlized application
# we need to parse the url before adding it
parsed_url = urlparse(address)
# .netloc gives us the unique identifier of the node address removing the unrequired part from it
self.nodes.add(parsed_url.netloc)
# this function help us to solve the problem of consensus protocols (competing chain)
def replace_chain(self):
# this variable help us to find the length of longest chain among different network
max_length = len(self.chain)
longest_chain = None
network = self.nodes # this variable will hold the address of all the nodes in network
for node in network:
# we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length
# using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200: # this ids the vode chaeck something is received in request
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# this will happen in every node of network
if longest_chain:
# if this chain is shorter than otherit will be updated
self.chain = longest_chain
return True
# if this chain is only longest in network than return false and no update
return False
# part-3 ends
# Mining our Blockchain
app = Flask(__name__)
# Creating a Blockchain
# creating the instance of blockchain
blockchain = Blockchain()
# Mining the blockchain
# create an random and unique address for the node on port 5000
# this is the address used by to send the whale coin when the miner mines the wahle coin
node_address = str(uuid4()).replace('-', '')
# part-2
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
# miners price
# usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software
blockchain.add_transaction(node_address, 'Bhavjot', 1)
# when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
# getting all blocks in chain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# custom message
@app.route('/', methods=['GET'])
def custom_message():
response = {
'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'
}
return jsonify(response), 200
# part-2 ends
# creating the transactions
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
# this will help us to extract te post request made in postman like req.params.name in express
json = request.get_json()
# this will hep us to check that all the parameters are present or not for adding the transactions
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(
json['sender'], json['receiver'], json['amount'])
# when the block is mined all the transations in lists is added to block
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json() # we will get request message send from postman
# {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node) # add our nodes to network
response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
# Replacing the chain by the longest chain if needed
# this function will present in every node of blockchain and always checked so that the node remain upadatesd with other blockchains by hitiing replace_chain URL
@ app.route('/replace_chain', methods=['GET'])
def replace_chain():
# using the above defined function in class
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced: # means the current blockchain was the shortest one and it is replaced
response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',
'new_chain': blockchain.chain}
else: # means the current blockchain was not the shortest one and it is not replaced
response = {'message': 'All good. The chain is the largest o | ne.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
# Running the app
# hos | conditional_block |
|
blockchain1.py | requests
from uuid import uuid4
from urllib.parse import urlparse
# Building a Blockchain
class Blockchain:
def __init__(self):
self.chain = [] # our main block chain
# now we will create the list of transation which will record the all transactions
self.transactions = []
# create_block used to create the block in blockchain so it is executed only when the block is mined(meaning it has winnnig proof_of_work=proof) proof=0 and previous_hash='0' for the genesis block
self.create_block(proof=0, previous_hash='0')
# nodes will contains the unique identifier of the address of all nodes in p2p network
self.nodes = set() # we have taken set() instead of list because we know that address are randomly generated by uuid4 to avoid duplicacy in it
# part1
def create_block(self, proof, previous_hash):
block = { # dictionary of python data structure
'index': len(self.chain)+1,
'timestamp': str(datetime.datetime.now()),
'proof': proof, # works like a nounce of block stops when we reach at or below the target
'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(
str(new_proof**2-previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's
# hash of a block is created after generating block thats we have only use previous_hash because its already created
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
# reference of first block stored genesis block
previous_block = chain[0]
block_index = 1 # required for iteration
while block_index < len(chain):
block = chain[block_index] # cuurent block
# checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
# verfying the proof of block with the data proof and previous proof it is easy then creating the proof
hash_operation = hashlib.sha256(
str(proof**2 - previous_proof**2).encode()).hexdigest()
# the more is zero's the more is harder to mine the block
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# functions used to get add the transactions to the lists
def add_transaction(self, senders, receiver, amount):
self.transactions.append({
'senders': senders,
'receiver': receiver,
'amount': amount
})
previous_block = self.get_previous_block()
# +1 beacause before mining the transaction are added so new_block index will be +1 then previous
return previous_block['index']+1
# part-1 ends
# part-3--> dealing with decentarlized application and transactions
# this function allow us to add different nodes to chain
def add_node(self, address): # generating the decentarlized application
# we need to parse the url before adding it
parsed_url = urlparse(address)
# .netloc gives us the unique identifier of the node address removing the unrequired part from it
self.nodes.add(parsed_url.netloc)
# this function help us to solve the problem of consensus protocols (competing chain)
def replace_chain(self):
# this variable help us to find the length of longest chain among different network
max_length = len(self.chain)
longest_chain = None
network = self.nodes # this variable will hold the address of all the nodes in network
for node in network:
# we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length
# using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200: # this ids the vode chaeck something is received in request
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# this will happen in every node of network
if longest_chain:
# if this chain is shorter than otherit will be updated
self.chain = longest_chain
return True
# if this chain is only longest in network than return false and no update
return False
# part-3 ends
# Mining our Blockchain
app = Flask(__name__)
# Creating a Blockchain
# creating the instance of blockchain
blockchain = Blockchain()
# Mining the blockchain
# create an random and unique address for the node on port 5000
# this is the address used by to send the whale coin when the miner mines the wahle coin
node_address = str(uuid4()).replace('-', '')
# part-2
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
# miners price
# usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software
blockchain.add_transaction(node_address, 'Bhavjot', 1)
# when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
# getting all blocks in chain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# custom message
@app.route('/', methods=['GET'])
def custom_message():
response = {
'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'
}
return jsonify(response), 200
# part-2 ends
# creating the transactions
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
# this will help us to extract te post request made in postman like req.params.name in express
json = request.get_json()
# this will hep us to check that all the parameters are present or not for adding the transactions
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(
json['sender'], json['receiver'], json['amount'])
# when the block is mined all the transations in lists is added to block
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json() # we will get request message send from postman
# {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes: | response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳 | blockchain.add_node(node) # add our nodes to network | random_line_split |
AgentConsole.controller.js | 151.207193));
AgentConController.map.setZoom(18);
marker = new google.maps.Marker({
// The below line is equivalent to writing:
// position: new google.maps.LatLng(-34.397, 150.644)
animation: google.maps.Animation.DROP,
position: {
lat: -33.862995,
lng: 151.207193
},
map: AgentConController.map
});
}
}
});
}
});
console.log("Selected language: " + reservation.task.attributes.selected_language);
console.log("-----");
var oModelExcepForCrate = new JSONModel({
"histories": {
"labels": ['March 2017', 'April 2017', 'May 2017', 'June 2017', 'July 2017', 'August 2017'],
"datasets": [{
"label": "Low",
"backgroundColor": '#ADD8E6',
"data": [10, 10, 9, 8, 5, 2]
}, {
"label": "Medium",
"backgroundColor": '#98FB98',
"data": [8, 16, 7, 5, 5, 7]
}, {
"label": "High",
"backgroundColor": '#FFA07A',
"data": [2, 3, 1, 5, 3, 5]
}]
},
options: {
scales: {
yAxes: [{
ticks: {
beginAtZero: true
}
}]
}
}
});
AgentConController.getOwnerComponent().setModel(oModelExcepForCrate, "custHistory");
AgentConController.getOwnerComponent().getModel("custHistory").refresh();
var data = {
openIssues: [{
TicketNo: "1001",
TicketDesc: "Device Damaged",
Status: "Open",
Priority: "1",
StartTime: "08-27-2017"
}, {
TicketNo: "1002",
TicketDesc: "Device making noise",
Status: "In Progress",
Priority: "2",
StartTime: "08-26-2017"
}, {
TicketNo: "1003",
TicketDesc: "Wrong Product Delivered",
Status: "Open",
Priority: "2",
StartTime: "08-26-2017"
}, {
TicketNo: "1004",
TicketDesc: "No Manual provided",
Status: "In Progress",
Priority: "3",
StartTime: "08-25-2017"
}, {
TicketNo: "1005",
TicketDesc: "Heating issue",
Status: "Open",
Priority: "1",
StartTime: "08-27-2017"
}]
};
AgentConController.getOwnerComponent().setModel(new JSONModel(data), "openIssuesModel");
AgentConController.getOwnerComponent().getModel("openIssuesModel").refresh();
});
worker.on("reservation.accepted", function(reservation) {
that.logger("Reservation " + reservation.sid + " accepted!");
//twiml.record({transcribe: true, maxLength: 30});
});
worker.on("reservation.rejected", function(reservation) {
that.logger("Reservation " + reservation.sid + " rejected!");
});
worker.on("reservation.timeout", function(reservation) {
that.logger("Reservation " + reservation.sid + " timed out!");
});
worker.on("reservation.canceled", function(reservation) {
that.logger("Reservation " + reservation.sid + " canceled!");
});
},
/* Hook up the agent Activity buttons to Worker.js */
bindAgentActivityButtons: function() {
// Fetch the full list of available Activities from TaskRouter. Store each
// ActivitySid against the matching Friendly Name
var activitySids = {};
worker.activities.fetch(function(error, activityList) {
var activities = activityList.data;
var i = activities.length;
while (i--) {
activitySids[activities[i].friendlyName] = activities[i].sid;
}
});
/* For each button of class 'change-activity' in our Agent UI, look up the
ActivitySid corresponding to the Friendly Name in the button’s next-activity
data attribute. Use Worker.js to transition the agent to that ActivitySid
when the button is clicked.*/
var elements = document.getElementsByClassName('change-activity');
var i = elements.length;
while (i--) {
elements[i].onclick = function() {
var nextActivity = this.dataset.nextActivity;
var nextActivitySid = activitySids[nextActivity];
worker.update("ActivitySid", nextActivitySid);
}
}
},
onSetBusy: function() {
worker.update("ActivitySid", "WA9dae4784b2317a747d2a71cc7a4a1466");
},
onSetIdle: function() {
worker.update("ActivitySid", "WA8f25e4d7b5024db83aef8843b30a670a");
},
onBase64: function() {
var reader = new FileReader();
reader.onload = function(readerEvt) {
var binaryString = readerEvt.target.result;
console.log(btoa(binaryString));
};
reader.readAsBinaryString("model/sample.flac")
},
/* Update the UI to reflect a change in Activity */
agentActivityChanged: function(activity) {
this.hideAgentActivities();
this.showAgentActivity(activity);
},
hideAgentActivities: function() {
var elements = document.getElementsByClassName('agent-activity');
var i = elements.length;
while (i--) {
//elements[i].style.display = 'none';
}
},
showAgentActivity: function(activity) {
activity = activity.toLowerCase();
var elements = document.getElementsByClassName(('agent-activity ' + activity));
//elements.item(0).style.display = 'block';
},
/* Other stuff */
logger: function(message) {
// var log = document.getElementById('log');
// log.value += "\n> " + message;
// log.scrollTop = log.scrollHeight;
console.log(message);
},
/*End Twilio Functions*/
/**
* Similar to onAfterRendering, but this hook is invoked before the controller's View is re-rendered
* (NOT before the first rendering! onInit() is used for that one!).
* @memberOf com.deloitte.smartservice.SMARTSERVICE.view.AgentConsole
*/
// onBeforeRendering: function() {
//
// },
/**
* Called when the View has been rendered (so its HTML is part of the document). Post-rendering manipulations of the HTML could be done here.
* This hook is the same one that SAPUI5 controls get after being rendered.
* @memberOf com.deloitte.smartservice.SMARTSERVICE.view.AgentConsole
*/
// onAfterRendering: function() {
//
// },
/**
* Called when the Controller is destroyed. Use this one to free resources and finalize activities.
* @memberOf com.deloitte.smartservice.SMARTSERVICE.view.AgentConsole
*/
// onExit: function() {
//
// }
onPatternMatch: function(oEvent) {
if (oEvent.getParameter("name") === "AgentConsole") {
| var today = new Date();
AgentConController.getView().byId("dr_fromTo").setSecondDateValue(today);
var oneWeekAgo = new Date();
oneWeekAgo.setDate(oneWeekAgo.getDate() - 7);
AgentConController.getView().byId("dr_fromTo").setDateValue(oneWeekAgo);
AgentConController.initialized = true;
AgentConController.getView().byId("map_canvas").setVisible(false);
AgentConController.getView().byId("map_canvas").setVisible(true);
AgentConController.getView().byId("map_canvas").onAfterRendering = function() {
AgentConController.geocoder = new google.maps.Geocoder();
window.mapOptions = {
center: new google.maps.LatLng(37.687878, -122.471780),
zoom: 16,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
AgentConController.map = new google.maps.Map(AgentConController.getView().byId("map_canvas").getDomRef(), mapOptions);
} | conditional_block |
|
AgentConsole.controller.js | AccountName === 'Deloitte') {
AgentConController.map.panTo(new google.maps.LatLng(-33.862995, 151.207193));
AgentConController.map.setZoom(18);
marker = new google.maps.Marker({
// The below line is equivalent to writing:
// position: new google.maps.LatLng(-34.397, 150.644)
animation: google.maps.Animation.DROP,
position: {
lat: -33.862995,
lng: 151.207193
},
map: AgentConController.map
});
}
}
});
}
});
console.log("Selected language: " + reservation.task.attributes.selected_language);
console.log("-----");
var oModelExcepForCrate = new JSONModel({
"histories": {
"labels": ['March 2017', 'April 2017', 'May 2017', 'June 2017', 'July 2017', 'August 2017'],
"datasets": [{
"label": "Low",
"backgroundColor": '#ADD8E6',
"data": [10, 10, 9, 8, 5, 2]
}, {
"label": "Medium",
"backgroundColor": '#98FB98',
"data": [8, 16, 7, 5, 5, 7]
}, {
"label": "High",
"backgroundColor": '#FFA07A',
"data": [2, 3, 1, 5, 3, 5]
}]
},
options: {
scales: {
yAxes: [{
ticks: {
beginAtZero: true
}
}]
}
}
});
AgentConController.getOwnerComponent().setModel(oModelExcepForCrate, "custHistory");
AgentConController.getOwnerComponent().getModel("custHistory").refresh();
var data = {
openIssues: [{
TicketNo: "1001",
TicketDesc: "Device Damaged",
Status: "Open",
Priority: "1",
StartTime: "08-27-2017"
}, {
TicketNo: "1002",
TicketDesc: "Device making noise",
Status: "In Progress",
Priority: "2",
StartTime: "08-26-2017"
}, {
TicketNo: "1003",
TicketDesc: "Wrong Product Delivered",
Status: "Open",
Priority: "2",
StartTime: "08-26-2017"
}, {
TicketNo: "1004",
TicketDesc: "No Manual provided",
Status: "In Progress",
Priority: "3",
StartTime: "08-25-2017"
}, {
TicketNo: "1005",
TicketDesc: "Heating issue",
Status: "Open",
Priority: "1",
StartTime: "08-27-2017"
}]
};
AgentConController.getOwnerComponent().setModel(new JSONModel(data), "openIssuesModel");
AgentConController.getOwnerComponent().getModel("openIssuesModel").refresh();
});
worker.on("reservation.accepted", function(reservation) {
that.logger("Reservation " + reservation.sid + " accepted!");
//twiml.record({transcribe: true, maxLength: 30});
});
worker.on("reservation.rejected", function(reservation) {
that.logger("Reservation " + reservation.sid + " rejected!");
});
worker.on("reservation.timeout", function(reservation) {
that.logger("Reservation " + reservation.sid + " timed out!");
});
worker.on("reservation.canceled", function(reservation) {
that.logger("Reservation " + reservation.sid + " canceled!");
});
},
/* Hook up the agent Activity buttons to Worker.js */
bindAgentActivityButtons: function() {
// Fetch the full list of available Activities from TaskRouter. Store each
// ActivitySid against the matching Friendly Name
var activitySids = {};
worker.activities.fetch(function(error, activityList) {
var activities = activityList.data;
var i = activities.length;
while (i--) {
activitySids[activities[i].friendlyName] = activities[i].sid;
}
});
/* For each button of class 'change-activity' in our Agent UI, look up the
ActivitySid corresponding to the Friendly Name in the button’s next-activity
data attribute. Use Worker.js to transition the agent to that ActivitySid
when the button is clicked.*/
var elements = document.getElementsByClassName('change-activity');
var i = elements.length;
while (i--) {
elements[i].onclick = function() {
var nextActivity = this.dataset.nextActivity;
var nextActivitySid = activitySids[nextActivity];
worker.update("ActivitySid", nextActivitySid);
}
}
},
onSetBusy: function() {
worker.update("ActivitySid", "WA9dae4784b2317a747d2a71cc7a4a1466");
},
onSetIdle: function() {
worker.update("ActivitySid", "WA8f25e4d7b5024db83aef8843b30a670a");
},
onBase64: function() {
var reader = new FileReader();
reader.onload = function(readerEvt) {
var binaryString = readerEvt.target.result;
console.log(btoa(binaryString));
};
reader.readAsBinaryString("model/sample.flac")
},
/* Update the UI to reflect a change in Activity */
agentActivityChanged: function(activity) {
this.hideAgentActivities();
this.showAgentActivity(activity);
},
hideAgentActivities: function() {
var elements = document.getElementsByClassName('agent-activity');
var i = elements.length;
while (i--) {
//elements[i].style.display = 'none';
}
},
showAgentActivity: function(activity) {
activity = activity.toLowerCase();
var elements = document.getElementsByClassName(('agent-activity ' + activity));
//elements.item(0).style.display = 'block';
},
/* Other stuff */
logger: function(message) {
// var log = document.getElementById('log');
// log.value += "\n> " + message;
// log.scrollTop = log.scrollHeight;
console.log(message);
},
/*End Twilio Functions*/
/**
* Similar to onAfterRendering, but this hook is invoked before the controller's View is re-rendered
* (NOT before the first rendering! onInit() is used for that one!).
* @memberOf com.deloitte.smartservice.SMARTSERVICE.view.AgentConsole
*/
// onBeforeRendering: function() {
//
// },
/**
* Called when the View has been rendered (so its HTML is part of the document). Post-rendering manipulations of the HTML could be done here.
* This hook is the same one that SAPUI5 controls get after being rendered.
* @memberOf com.deloitte.smartservice.SMARTSERVICE.view.AgentConsole
*/
// onAfterRendering: function() {
//
// },
/**
* Called when the Controller is destroyed. Use this one to free resources and finalize activities.
* @memberOf com.deloitte.smartservice.SMARTSERVICE.view.AgentConsole
*/
// onExit: function() {
//
// }
onPatternMatch: function(oEvent) {
if (oEvent.getParameter("name") === "AgentConsole") {
var today = new Date();
AgentConController.getView().byId("dr_fromTo").setSecondDateValue(today);
var oneWeekAgo = new Date();
oneWeekAgo.setDate(oneWeekAgo.getDate() - 7);
AgentConController.getView().byId("dr_fromTo").setDateValue(oneWeekAgo);
AgentConController.initialized = true;
AgentConController.getView().byId("map_canvas").setVisible(false);
AgentConController.getView().byId("map_canvas").setVisible(true);
AgentConController.getView().byId("map_canvas").onAfterRendering = function() {
AgentConController.geocoder = new google.maps.Geocoder();
window.mapOptions = {
center: new google.maps.LatLng(37.687878, -122.471780), | zoom: 16,
mapTypeId: google.maps.MapTypeId.ROADMAP
}; | random_line_split |
|
CS3243_P1_41_2.py |
def __lt__(self, other):
if (self.current_cost + self.heuristic_value) == (other.current_cost + other.heuristic_value):
if (self.current_cost == other.current_cost):
return (self.id > other.id)
return (self.current_cost > other.current_cost)
return (self.current_cost + self.heuristic_value) < (other.current_cost + other.heuristic_value)
def __eq__(self, other):
return (self.current_cost + self.heuristic_value) == (other.current_cost + other.heuristic_value)
# set current path cost
def set_current_cost(self, cost):
self.current_cost = cost
# get current path cost
def get_current_cost(self):
return self.current_cost
# set heuristic value
def set_heuristic_value(self, value):
self.heuristic_value = value
# get heuristic value
def get_heuristic_value(self):
return self.heuristic_value
# set current depth
def set_current_depth(self, depth):
self.depth = depth
def set_previous_action(self, action):
self.previous_action = action
def get_previous_action(self):
return self.previous_action
def set_previous_node(self, node):
self.previous_node = node
def get_previous_node(self):
return self.previous_node
def set_id(self, next_id):
self.id = next_id
class Puzzle(object):
def __init__(self, init_state, goal_state):
# you may add more attributes if you think is useful
self.name = "Manhattan Distance"
self.size = len(init_state)
self.init_state = Node(init_state)
self.goal_state = Node(goal_state)
self.visited = dict()
self.actions = list()
self.inversions = 0
self.max_depth = 0 # max depth reached by tree/graph search
self.max_size = 0
self.nodes_expanded = 0 # number of nodes expanded
self.time_taken = 0 # time taken for the latest executed solve operation (in seconds)
def solve(self):
start = time()
if not self.check_for_solvability():
self.actions.append("UNSOLVABLE")
print "UNSOLVABLE"
return self.actions
'''
A* with h1: manhattan distance
'''
self.init_state.set_current_cost(0)
self.init_state.set_current_depth(0)
self.init_state.set_heuristic_value(self.get_heuristic_value(self.init_state))
frontier = []
contains = {} # hash table to keep track of the minimum cost to each node
next_id = 0;
heapq.heappush(frontier, self.init_state)
self.visited[self.tupify(self.init_state.state)] = 0;
self.max_size += 1
while frontier[0].get_heuristic_value() > 0:
curr = heapq.heappop(frontier)
self.nodes_expanded += 1;
self.visited[self.tupify(curr.state)] = curr.get_current_cost()
for i in self.generate_possibilities(curr): # generate possibilities will only return nodes that have not yet been visited
next_id += 1;
i.set_current_cost(curr.get_current_cost() + 1)
i.set_heuristic_value(self.get_heuristic_value(i))
i.set_id(next_id);
key = self.tupify(i.state)
if key not in contains or contains[key] > i.get_current_cost():
heapq.heappush(frontier, i)
contains[key] = i.get_current_cost()
if len(frontier) > self.max_size:
self.max_size = len(frontier)
backtrack = frontier[0]
while backtrack != None:
self.actions.append(backtrack.get_previous_action())
backtrack = backtrack.get_previous_node()
self.actions.reverse()
self.time_taken = time() - start
return self.actions
# returns Effective Branching Factor
def get_EFB(self):
return math.pow(self.nodes_expanded, 1/self.max_depth)
# returns number of nodes expanded by latest exeuted solve operation
def get_nodes_expanded(self):
return self.nodes_expanded
# checks for whether the puzzle provided is solvable
def check_for_solvability(self):
inversions = 0
row_with_blank = None
initial = []
for row in range(self.size):
for e in self.init_state.state[row]:
if e == 0:
row_with_blank = row
continue
initial.append(e)
for i in range(len(initial)):
curr = initial[i]
for j in range(i, len(initial)):
if initial[j] < initial[i]:
inversions += 1
self.inversions = inversions
if (self.size % 2):
return (inversions % 2 == 0)
else:
if (row_with_blank % 2):
return (inversions % 2 == 0)
else:
return (inversions % 2 == 1)
# calculates heuristic value for a given node (h1: manhattan distance)
def get_heuristic_value(self, node):
## h0: Hamming Distance (kept for posterity)
# count = 0
# for i in range(self.size):
# for j in range(self.size):
# if node.state[i][j] != self.goal_state.state[i][j]:
# count += 1
# return count
count = 0
for i in range(self.size):
for j in range(self.size):
curr = node.state[i][j]
if curr == 0:
continue
coord_in_goal = self.search_in_goal(curr);
count += (abs(coord_in_goal[0]-i) + abs(coord_in_goal[1]-j))
return count
def search_in_goal(self, query):
return ((query - 1) // self.size, (query - 1) % self.size)
def generate_possibilities(self, node):
blank_x = -1
blank_y = -1
nodes = []
for y in range(self.size):
if blank_x != -1:
break
for x in range(self.size):
if node.state[y][x] == 0:
blank_x = x
blank_y = y
break
# move blank left => move tile right
if blank_x > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x - 1]
temp[blank_y][blank_x - 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " RIGHT")
nodes.append(temp_node)
# move blank right => move tile left
if blank_x < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x + 1]
temp[blank_y][blank_x + 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " LEFT")
nodes.append(temp_node)
# move blank down => move tile up
if blank_y < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y + 1][blank_x]
temp[blank_y + 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " UP")
nodes.append(temp_node)
# move blank up => move tile down
if blank_y > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y - 1][blank_x]
temp[blank_y - 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " DOWN")
nodes.append(temp_node)
return nodes
def tupify(self, stateList):
outer = []
for i in stateList:
outer.append(tuple(i))
return tuple(outer)
def twodimensional_copy(self, stateList):
return [i[:] for i in stateList]
# you may add more functions if you think is useful
if __name__ == "__main__":
# do NOT modify below
# argv[0] represents the name of the | self.state = orientation # a list of lists corresponding to the orientation of the tiles
self.current_cost = float('inf')
self.heuristic_value = float('inf')
self.previous_action = "START"
self.previous_node = None
self.id = 0 # id of the node inserted (later nodes have a higher id)
# stats
self.depth = float('inf') | identifier_body |
|
CS3243_P1_41_2.py | # stats
self.depth = float('inf')
def __lt__(self, other):
if (self.current_cost + self.heuristic_value) == (other.current_cost + other.heuristic_value):
if (self.current_cost == other.current_cost):
return (self.id > other.id)
return (self.current_cost > other.current_cost)
return (self.current_cost + self.heuristic_value) < (other.current_cost + other.heuristic_value)
def __eq__(self, other):
return (self.current_cost + self.heuristic_value) == (other.current_cost + other.heuristic_value)
# set current path cost
def set_current_cost(self, cost):
self.current_cost = cost
# get current path cost
def get_current_cost(self):
return self.current_cost
# set heuristic value
def set_heuristic_value(self, value):
self.heuristic_value = value
# get heuristic value
def get_heuristic_value(self):
return self.heuristic_value
# set current depth
def set_current_depth(self, depth):
self.depth = depth
def set_previous_action(self, action):
self.previous_action = action
def get_previous_action(self):
return self.previous_action
def set_previous_node(self, node):
self.previous_node = node
def | (self):
return self.previous_node
def set_id(self, next_id):
self.id = next_id
class Puzzle(object):
def __init__(self, init_state, goal_state):
# you may add more attributes if you think is useful
self.name = "Manhattan Distance"
self.size = len(init_state)
self.init_state = Node(init_state)
self.goal_state = Node(goal_state)
self.visited = dict()
self.actions = list()
self.inversions = 0
self.max_depth = 0 # max depth reached by tree/graph search
self.max_size = 0
self.nodes_expanded = 0 # number of nodes expanded
self.time_taken = 0 # time taken for the latest executed solve operation (in seconds)
def solve(self):
start = time()
if not self.check_for_solvability():
self.actions.append("UNSOLVABLE")
print "UNSOLVABLE"
return self.actions
'''
A* with h1: manhattan distance
'''
self.init_state.set_current_cost(0)
self.init_state.set_current_depth(0)
self.init_state.set_heuristic_value(self.get_heuristic_value(self.init_state))
frontier = []
contains = {} # hash table to keep track of the minimum cost to each node
next_id = 0;
heapq.heappush(frontier, self.init_state)
self.visited[self.tupify(self.init_state.state)] = 0;
self.max_size += 1
while frontier[0].get_heuristic_value() > 0:
curr = heapq.heappop(frontier)
self.nodes_expanded += 1;
self.visited[self.tupify(curr.state)] = curr.get_current_cost()
for i in self.generate_possibilities(curr): # generate possibilities will only return nodes that have not yet been visited
next_id += 1;
i.set_current_cost(curr.get_current_cost() + 1)
i.set_heuristic_value(self.get_heuristic_value(i))
i.set_id(next_id);
key = self.tupify(i.state)
if key not in contains or contains[key] > i.get_current_cost():
heapq.heappush(frontier, i)
contains[key] = i.get_current_cost()
if len(frontier) > self.max_size:
self.max_size = len(frontier)
backtrack = frontier[0]
while backtrack != None:
self.actions.append(backtrack.get_previous_action())
backtrack = backtrack.get_previous_node()
self.actions.reverse()
self.time_taken = time() - start
return self.actions
# returns Effective Branching Factor
def get_EFB(self):
return math.pow(self.nodes_expanded, 1/self.max_depth)
# returns number of nodes expanded by latest exeuted solve operation
def get_nodes_expanded(self):
return self.nodes_expanded
# checks for whether the puzzle provided is solvable
def check_for_solvability(self):
inversions = 0
row_with_blank = None
initial = []
for row in range(self.size):
for e in self.init_state.state[row]:
if e == 0:
row_with_blank = row
continue
initial.append(e)
for i in range(len(initial)):
curr = initial[i]
for j in range(i, len(initial)):
if initial[j] < initial[i]:
inversions += 1
self.inversions = inversions
if (self.size % 2):
return (inversions % 2 == 0)
else:
if (row_with_blank % 2):
return (inversions % 2 == 0)
else:
return (inversions % 2 == 1)
# calculates heuristic value for a given node (h1: manhattan distance)
def get_heuristic_value(self, node):
## h0: Hamming Distance (kept for posterity)
# count = 0
# for i in range(self.size):
# for j in range(self.size):
# if node.state[i][j] != self.goal_state.state[i][j]:
# count += 1
# return count
count = 0
for i in range(self.size):
for j in range(self.size):
curr = node.state[i][j]
if curr == 0:
continue
coord_in_goal = self.search_in_goal(curr);
count += (abs(coord_in_goal[0]-i) + abs(coord_in_goal[1]-j))
return count
def search_in_goal(self, query):
return ((query - 1) // self.size, (query - 1) % self.size)
def generate_possibilities(self, node):
blank_x = -1
blank_y = -1
nodes = []
for y in range(self.size):
if blank_x != -1:
break
for x in range(self.size):
if node.state[y][x] == 0:
blank_x = x
blank_y = y
break
# move blank left => move tile right
if blank_x > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x - 1]
temp[blank_y][blank_x - 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " RIGHT")
nodes.append(temp_node)
# move blank right => move tile left
if blank_x < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x + 1]
temp[blank_y][blank_x + 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " LEFT")
nodes.append(temp_node)
# move blank down => move tile up
if blank_y < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y + 1][blank_x]
temp[blank_y + 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " UP")
nodes.append(temp_node)
# move blank up => move tile down
if blank_y > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y - 1][blank_x]
temp[blank_y - 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " DOWN")
nodes.append(temp_node)
return nodes
def tupify(self, stateList):
outer = []
for i in stateList:
outer.append(tuple(i))
return tuple(outer)
def twodimensional_copy(self, stateList):
return [i[:] for i in stateList]
# you may add more functions if you think is useful
if __name__ == "__main__":
# do NOT modify below
# argv[0] represents the name of the file that is being executed
# argv[1] represents name of input file
# argv[2] represents name of destination output file
if len(sys.argv) != 3:
raise ValueError("Wrong number of arguments!")
try:
f = open(sys.argv[1], 'r')
except IOError:
| get_previous_node | identifier_name |
CS3243_P1_41_2.py | )
# stats
self.depth = float('inf')
def __lt__(self, other):
if (self.current_cost + self.heuristic_value) == (other.current_cost + other.heuristic_value):
if (self.current_cost == other.current_cost):
return (self.id > other.id)
return (self.current_cost > other.current_cost)
return (self.current_cost + self.heuristic_value) < (other.current_cost + other.heuristic_value)
def __eq__(self, other):
return (self.current_cost + self.heuristic_value) == (other.current_cost + other.heuristic_value)
# set current path cost
def set_current_cost(self, cost):
self.current_cost = cost
# get current path cost
def get_current_cost(self):
return self.current_cost
# set heuristic value
def set_heuristic_value(self, value):
self.heuristic_value = value
# get heuristic value
def get_heuristic_value(self):
return self.heuristic_value
# set current depth
def set_current_depth(self, depth):
self.depth = depth
def set_previous_action(self, action):
self.previous_action = action
def get_previous_action(self):
return self.previous_action
def set_previous_node(self, node):
self.previous_node = node
def get_previous_node(self):
return self.previous_node
def set_id(self, next_id):
self.id = next_id
class Puzzle(object):
def __init__(self, init_state, goal_state):
# you may add more attributes if you think is useful
self.name = "Manhattan Distance"
self.size = len(init_state)
self.init_state = Node(init_state)
self.goal_state = Node(goal_state)
self.visited = dict()
self.actions = list()
self.inversions = 0
self.max_depth = 0 # max depth reached by tree/graph search
self.max_size = 0
self.nodes_expanded = 0 # number of nodes expanded
self.time_taken = 0 # time taken for the latest executed solve operation (in seconds)
def solve(self):
start = time()
if not self.check_for_solvability():
self.actions.append("UNSOLVABLE")
print "UNSOLVABLE"
return self.actions
'''
A* with h1: manhattan distance
'''
self.init_state.set_current_cost(0)
self.init_state.set_current_depth(0)
self.init_state.set_heuristic_value(self.get_heuristic_value(self.init_state))
frontier = []
contains = {} # hash table to keep track of the minimum cost to each node
next_id = 0;
heapq.heappush(frontier, self.init_state)
self.visited[self.tupify(self.init_state.state)] = 0;
self.max_size += 1
while frontier[0].get_heuristic_value() > 0:
curr = heapq.heappop(frontier)
self.nodes_expanded += 1;
self.visited[self.tupify(curr.state)] = curr.get_current_cost()
for i in self.generate_possibilities(curr): # generate possibilities will only return nodes that have not yet been visited
next_id += 1;
i.set_current_cost(curr.get_current_cost() + 1)
i.set_heuristic_value(self.get_heuristic_value(i))
i.set_id(next_id);
key = self.tupify(i.state)
if key not in contains or contains[key] > i.get_current_cost():
heapq.heappush(frontier, i)
contains[key] = i.get_current_cost()
if len(frontier) > self.max_size:
self.max_size = len(frontier)
backtrack = frontier[0]
while backtrack != None:
self.actions.append(backtrack.get_previous_action())
backtrack = backtrack.get_previous_node()
self.actions.reverse()
self.time_taken = time() - start
return self.actions
# returns Effective Branching Factor
def get_EFB(self):
return math.pow(self.nodes_expanded, 1/self.max_depth)
# returns number of nodes expanded by latest exeuted solve operation
def get_nodes_expanded(self):
return self.nodes_expanded
# checks for whether the puzzle provided is solvable
def check_for_solvability(self):
inversions = 0
row_with_blank = None
initial = []
for row in range(self.size):
for e in self.init_state.state[row]:
if e == 0:
row_with_blank = row
continue
initial.append(e)
for i in range(len(initial)):
curr = initial[i]
for j in range(i, len(initial)):
if initial[j] < initial[i]:
inversions += 1
self.inversions = inversions
if (self.size % 2):
return (inversions % 2 == 0)
else:
if (row_with_blank % 2):
return (inversions % 2 == 0)
else:
return (inversions % 2 == 1)
# calculates heuristic value for a given node (h1: manhattan distance)
def get_heuristic_value(self, node):
## h0: Hamming Distance (kept for posterity)
# count = 0
# for i in range(self.size):
# for j in range(self.size):
# if node.state[i][j] != self.goal_state.state[i][j]:
# count += 1
# return count
count = 0 | coord_in_goal = self.search_in_goal(curr);
count += (abs(coord_in_goal[0]-i) + abs(coord_in_goal[1]-j))
return count
def search_in_goal(self, query):
return ((query - 1) // self.size, (query - 1) % self.size)
def generate_possibilities(self, node):
blank_x = -1
blank_y = -1
nodes = []
for y in range(self.size):
if blank_x != -1:
break
for x in range(self.size):
if node.state[y][x] == 0:
blank_x = x
blank_y = y
break
# move blank left => move tile right
if blank_x > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x - 1]
temp[blank_y][blank_x - 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " RIGHT")
nodes.append(temp_node)
# move blank right => move tile left
if blank_x < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x + 1]
temp[blank_y][blank_x + 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " LEFT")
nodes.append(temp_node)
# move blank down => move tile up
if blank_y < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y + 1][blank_x]
temp[blank_y + 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " UP")
nodes.append(temp_node)
# move blank up => move tile down
if blank_y > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y - 1][blank_x]
temp[blank_y - 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " DOWN")
nodes.append(temp_node)
return nodes
def tupify(self, stateList):
outer = []
for i in stateList:
outer.append(tuple(i))
return tuple(outer)
def twodimensional_copy(self, stateList):
return [i[:] for i in stateList]
# you may add more functions if you think is useful
if __name__ == "__main__":
# do NOT modify below
# argv[0] represents the name of the file that is being executed
# argv[1] represents name of input file
# argv[2] represents name of destination output file
if len(sys.argv) != 3:
raise ValueError("Wrong number of arguments!")
try:
f = open(sys.argv[1], 'r')
except IOError:
raise IOError | for i in range(self.size):
for j in range(self.size):
curr = node.state[i][j]
if curr == 0:
continue | random_line_split |
CS3243_P1_41_2.py | return (self.current_cost + self.heuristic_value) == (other.current_cost + other.heuristic_value)
# set current path cost
def set_current_cost(self, cost):
self.current_cost = cost
# get current path cost
def get_current_cost(self):
return self.current_cost
# set heuristic value
def set_heuristic_value(self, value):
self.heuristic_value = value
# get heuristic value
def get_heuristic_value(self):
return self.heuristic_value
# set current depth
def set_current_depth(self, depth):
self.depth = depth
def set_previous_action(self, action):
self.previous_action = action
def get_previous_action(self):
return self.previous_action
def set_previous_node(self, node):
self.previous_node = node
def get_previous_node(self):
return self.previous_node
def set_id(self, next_id):
self.id = next_id
class Puzzle(object):
def __init__(self, init_state, goal_state):
# you may add more attributes if you think is useful
self.name = "Manhattan Distance"
self.size = len(init_state)
self.init_state = Node(init_state)
self.goal_state = Node(goal_state)
self.visited = dict()
self.actions = list()
self.inversions = 0
self.max_depth = 0 # max depth reached by tree/graph search
self.max_size = 0
self.nodes_expanded = 0 # number of nodes expanded
self.time_taken = 0 # time taken for the latest executed solve operation (in seconds)
def solve(self):
start = time()
if not self.check_for_solvability():
self.actions.append("UNSOLVABLE")
print "UNSOLVABLE"
return self.actions
'''
A* with h1: manhattan distance
'''
self.init_state.set_current_cost(0)
self.init_state.set_current_depth(0)
self.init_state.set_heuristic_value(self.get_heuristic_value(self.init_state))
frontier = []
contains = {} # hash table to keep track of the minimum cost to each node
next_id = 0;
heapq.heappush(frontier, self.init_state)
self.visited[self.tupify(self.init_state.state)] = 0;
self.max_size += 1
while frontier[0].get_heuristic_value() > 0:
curr = heapq.heappop(frontier)
self.nodes_expanded += 1;
self.visited[self.tupify(curr.state)] = curr.get_current_cost()
for i in self.generate_possibilities(curr): # generate possibilities will only return nodes that have not yet been visited
next_id += 1;
i.set_current_cost(curr.get_current_cost() + 1)
i.set_heuristic_value(self.get_heuristic_value(i))
i.set_id(next_id);
key = self.tupify(i.state)
if key not in contains or contains[key] > i.get_current_cost():
heapq.heappush(frontier, i)
contains[key] = i.get_current_cost()
if len(frontier) > self.max_size:
self.max_size = len(frontier)
backtrack = frontier[0]
while backtrack != None:
self.actions.append(backtrack.get_previous_action())
backtrack = backtrack.get_previous_node()
self.actions.reverse()
self.time_taken = time() - start
return self.actions
# returns Effective Branching Factor
def get_EFB(self):
return math.pow(self.nodes_expanded, 1/self.max_depth)
# returns number of nodes expanded by latest exeuted solve operation
def get_nodes_expanded(self):
return self.nodes_expanded
# checks for whether the puzzle provided is solvable
def check_for_solvability(self):
inversions = 0
row_with_blank = None
initial = []
for row in range(self.size):
for e in self.init_state.state[row]:
if e == 0:
row_with_blank = row
continue
initial.append(e)
for i in range(len(initial)):
curr = initial[i]
for j in range(i, len(initial)):
if initial[j] < initial[i]:
inversions += 1
self.inversions = inversions
if (self.size % 2):
return (inversions % 2 == 0)
else:
if (row_with_blank % 2):
return (inversions % 2 == 0)
else:
return (inversions % 2 == 1)
# calculates heuristic value for a given node (h1: manhattan distance)
def get_heuristic_value(self, node):
## h0: Hamming Distance (kept for posterity)
# count = 0
# for i in range(self.size):
# for j in range(self.size):
# if node.state[i][j] != self.goal_state.state[i][j]:
# count += 1
# return count
count = 0
for i in range(self.size):
for j in range(self.size):
curr = node.state[i][j]
if curr == 0:
continue
coord_in_goal = self.search_in_goal(curr);
count += (abs(coord_in_goal[0]-i) + abs(coord_in_goal[1]-j))
return count
def search_in_goal(self, query):
return ((query - 1) // self.size, (query - 1) % self.size)
def generate_possibilities(self, node):
blank_x = -1
blank_y = -1
nodes = []
for y in range(self.size):
if blank_x != -1:
break
for x in range(self.size):
if node.state[y][x] == 0:
blank_x = x
blank_y = y
break
# move blank left => move tile right
if blank_x > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x - 1]
temp[blank_y][blank_x - 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " RIGHT")
nodes.append(temp_node)
# move blank right => move tile left
if blank_x < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y][blank_x + 1]
temp[blank_y][blank_x + 1] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " LEFT")
nodes.append(temp_node)
# move blank down => move tile up
if blank_y < self.size - 1:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y + 1][blank_x]
temp[blank_y + 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " UP")
nodes.append(temp_node)
# move blank up => move tile down
if blank_y > 0:
temp = self.twodimensional_copy(node.state)
temp[blank_y][blank_x] = temp[blank_y - 1][blank_x]
temp[blank_y - 1][blank_x] = 0
if self.tupify(temp) not in self.visited:
temp_node = Node(temp)
temp_node.set_previous_node(node)
temp_node.set_previous_action(str(temp[blank_y][blank_x]) + " DOWN")
nodes.append(temp_node)
return nodes
def tupify(self, stateList):
outer = []
for i in stateList:
outer.append(tuple(i))
return tuple(outer)
def twodimensional_copy(self, stateList):
return [i[:] for i in stateList]
# you may add more functions if you think is useful
if __name__ == "__main__":
# do NOT modify below
# argv[0] represents the name of the file that is being executed
# argv[1] represents name of input file
# argv[2] represents name of destination output file
| if len(sys.argv) != 3:
raise ValueError("Wrong number of arguments!")
try:
f = open(sys.argv[1], 'r')
except IOError:
raise IOError("Input file not found!")
lines = f.readlines()
# n = num rows in input file
n = len(lines)
# max_num = n to the power of 2 - 1
max_num = n ** 2 - 1
# Instantiate a 2D list of size n x n
init_state = [[0 for i in range(n)] for j in range(n)]
goal_state = [[0 for i in range(n)] for j in range(n)]
| conditional_block |
|
handlers_events.go | return err
}
logger.Infof(strings.Repeat("-", 80))
logger.Infof("Registration Open from block %v to %v", event.DkgStarts, event.RegistrationEnds)
logger.Infof(strings.Repeat("-", 80))
logger.Infof(" publicKey: %v", dkgtasks.FormatBigIntSlice(public[:]))
logger.Infof(strings.Repeat("-", 80))
// Record the schedule
schedule := &EthDKGSchedule{}
schedule.RegistrationStart = log.BlockNumber
schedule.RegistrationEnd = event.RegistrationEnds.Uint64()
schedule.ShareDistributionStart = schedule.RegistrationEnd + 1
schedule.ShareDistributionEnd = event.ShareDistributionEnds.Uint64()
schedule.DisputeStart = schedule.ShareDistributionEnd + 1
schedule.DisputeEnd = event.DisputeEnds.Uint64()
schedule.KeyShareSubmissionStart = schedule.DisputeEnd + 1
schedule.KeyShareSubmissionEnd = event.KeyShareSubmissionEnds.Uint64()
schedule.MPKSubmissionStart = schedule.KeyShareSubmissionEnd + 1
schedule.MPKSubmissionEnd = event.MpkSubmissionEnds.Uint64()
schedule.GPKJSubmissionStart = schedule.MPKSubmissionEnd + 1
schedule.GPKJSubmissionEnd = event.GpkjSubmissionEnds.Uint64()
schedule.GPKJGroupAccusationStart = schedule.GPKJSubmissionEnd + 1
schedule.GPKJGroupAccusationEnd = event.GpkjDisputeEnds.Uint64()
schedule.CompleteStart = schedule.GPKJGroupAccusationEnd + 1
schedule.CompleteEnd = event.DkgComplete.Uint64()
// TODO associate names with these also to help with debugging/logging
ib := make(map[uint64]func(*State, uint64) error)
ib[schedule.ShareDistributionStart] = svcs.DoDistributeShares // Do ShareDistribution
ib[schedule.DisputeStart] = svcs.DoSubmitDispute // Do Disputes
ib[schedule.KeyShareSubmissionStart] = svcs.DoSubmitKeyShare // Do KeyShareSubmission
ib[schedule.MPKSubmissionStart] = svcs.DoSubmitMasterPublicKey // Do MPKSubmission
ib[schedule.GPKJSubmissionStart] = svcs.DoSubmitGPKj // Do GPKJSubmission
ib[schedule.GPKJGroupAccusationStart] = svcs.DoGroupAccusationGPKj // Do GPKJDisputes
ib[schedule.CompleteStart] = svcs.DoSuccessfulCompletion // Do SuccessfulCompletion
logger.Infof("Adding block processors for %v", ib)
state.interestingBlocks = ib
acct := eth.GetDefaultAccount()
state.ethdkg = NewEthDKGState()
state.ethdkg.Address = acct.Address
state.ethdkg.Schedule = schedule
state.ethdkg.TransportPrivateKey = private
state.ethdkg.TransportPublicKey = public
taskLogger := logging.GetLogger("rt")
task := dkgtasks.NewRegisterTask(taskLogger, eth, acct,
state.ethdkg.TransportPublicKey,
state.ethdkg.Schedule.RegistrationEnd)
state.ethdkg.RegistrationTH = svcs.taskMan.NewTaskHandler(eth.Timeout(), eth.RetryDelay(), task)
state.ethdkg.RegistrationTH.Start()
} else {
logger.Infof("Not participating in DKG... registration ends at height %v but height %v is finalized.",
event.RegistrationEnds, state.HighestBlockFinalized)
// state.ethdkg = EthDKGState{} // TODO I need to cancel any TaskHandlers too
}
return nil
}
// ProcessShareDistribution accumulates everyones shares ETHDKG
func (svcs *Services) ProcessShareDistribution(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessShareDistribution()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring share distribution since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
ethdkg := state.ethdkg
event, err := c.Ethdkg.ParseShareDistribution(log)
if err != nil {
return err
}
ethdkg.Commitments[event.Issuer] = event.Commitments
ethdkg.EncryptedShares[event.Issuer] = event.EncryptedShares
return nil
}
// ProcessKeyShareSubmission ETHDKG
func (svcs *Services) ProcessKeyShareSubmission(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessKeyShareSubmission()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring key share submission since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseKeyShareSubmission(log)
if err != nil {
return err
}
addr := event.Issuer
keyshareG1 := event.KeyShareG1
keyshareG1Proof := event.KeyShareG1CorrectnessProof
keyshareG2 := event.KeyShareG2
logger.Infof("keyshareG1:%v keyshareG2:%v", dkgtasks.FormatBigIntSlice(keyshareG1[:]), dkgtasks.FormatBigIntSlice(keyshareG2[:]))
state.ethdkg.KeyShareG1s[addr] = keyshareG1
state.ethdkg.KeyShareG1CorrectnessProofs[addr] = keyshareG1Proof
state.ethdkg.KeyShareG2s[addr] = keyshareG2
return nil
}
// ProcessValidatorSet handles receiving validatorSet changes
func (svcs *Services) ProcessValidatorSet(state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
updatedState := state
event, err := c.Ethdkg.ParseValidatorSet(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
vs := state.ValidatorSets[epoch]
vs.NotBeforeMadNetHeight = event.MadHeight
vs.ValidatorCount = event.ValidatorCount
vs.GroupKey[0] = *event.GroupKey0
vs.GroupKey[1] = *event.GroupKey1
vs.GroupKey[2] = *event.GroupKey2
vs.GroupKey[3] = *event.GroupKey3
updatedState.ValidatorSets[epoch] = vs
err = svcs.checkValidatorSet(updatedState, epoch)
if err != nil {
return err
}
return nil
}
// ProcessValidatorMember handles receiving keys for a specific validator
func (svcs *Services) ProcessValidatorMember(state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseValidatorMember(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
index := uint8(event.Index.Uint64()) - 1
v := Validator{
Account: event.Account,
Index: index,
SharedKey: [4]big.Int{*event.Share0, *event.Share1, *event.Share2, *event.Share3},
}
if len(state.Validators) < int(index+1) {
newValList := make([]Validator, int(index+1))
copy(newValList, state.Validators[epoch])
state.Validators[epoch] = newValList
}
state.Validators[epoch][index] = v
ptrGroupShare := [4]*big.Int{
&v.SharedKey[0], &v.SharedKey[1],
&v.SharedKey[2], &v.SharedKey[3]}
groupShare, err := bn256.MarshalG2Big(ptrGroupShare)
if err != nil {
svcs.logger.Errorf("Failed to marshal groupShare: %v", err)
return err
}
svcs.logger.Debugf("Validator member %v %x", v.Index, groupShare)
err = svcs.checkValidatorSet(state, epoch)
if err != nil {
return err
}
return nil
}
func (svcs *Services) checkValidatorSet(state *State, epoch uint32) error {
logger := svcs.logger
// Make sure we've received a validator set event
validatorSet, present := state.ValidatorSets[ | {
eth := svcs.eth
c := eth.Contracts()
logger := svcs.logger
event, err := c.Ethdkg.ParseRegistrationOpen(log)
if err != nil {
return err
}
if ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warnf("Received RegistrationOpen event while ETHDKG in progress. Aborting old round.")
AbortETHDKG(state.ethdkg)
}
if event.RegistrationEnds.Uint64() > state.HighestBlockFinalized {
private, public, err := dkg.GenerateKeys()
if err != nil { | identifier_body |
|
handlers_events.go | := dkgtasks.NewRegisterTask(taskLogger, eth, acct,
state.ethdkg.TransportPublicKey,
state.ethdkg.Schedule.RegistrationEnd)
state.ethdkg.RegistrationTH = svcs.taskMan.NewTaskHandler(eth.Timeout(), eth.RetryDelay(), task)
state.ethdkg.RegistrationTH.Start()
} else {
logger.Infof("Not participating in DKG... registration ends at height %v but height %v is finalized.",
event.RegistrationEnds, state.HighestBlockFinalized)
// state.ethdkg = EthDKGState{} // TODO I need to cancel any TaskHandlers too
}
return nil
}
// ProcessShareDistribution accumulates everyones shares ETHDKG
func (svcs *Services) ProcessShareDistribution(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessShareDistribution()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring share distribution since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
ethdkg := state.ethdkg
event, err := c.Ethdkg.ParseShareDistribution(log)
if err != nil {
return err
}
ethdkg.Commitments[event.Issuer] = event.Commitments
ethdkg.EncryptedShares[event.Issuer] = event.EncryptedShares
return nil
}
// ProcessKeyShareSubmission ETHDKG
func (svcs *Services) ProcessKeyShareSubmission(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessKeyShareSubmission()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring key share submission since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseKeyShareSubmission(log)
if err != nil {
return err
}
addr := event.Issuer
keyshareG1 := event.KeyShareG1
keyshareG1Proof := event.KeyShareG1CorrectnessProof
keyshareG2 := event.KeyShareG2
logger.Infof("keyshareG1:%v keyshareG2:%v", dkgtasks.FormatBigIntSlice(keyshareG1[:]), dkgtasks.FormatBigIntSlice(keyshareG2[:]))
state.ethdkg.KeyShareG1s[addr] = keyshareG1
state.ethdkg.KeyShareG1CorrectnessProofs[addr] = keyshareG1Proof
state.ethdkg.KeyShareG2s[addr] = keyshareG2
return nil
}
// ProcessValidatorSet handles receiving validatorSet changes
func (svcs *Services) ProcessValidatorSet(state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
updatedState := state
event, err := c.Ethdkg.ParseValidatorSet(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
vs := state.ValidatorSets[epoch]
vs.NotBeforeMadNetHeight = event.MadHeight
vs.ValidatorCount = event.ValidatorCount
vs.GroupKey[0] = *event.GroupKey0
vs.GroupKey[1] = *event.GroupKey1
vs.GroupKey[2] = *event.GroupKey2
vs.GroupKey[3] = *event.GroupKey3
updatedState.ValidatorSets[epoch] = vs
err = svcs.checkValidatorSet(updatedState, epoch)
if err != nil {
return err
}
return nil
}
// ProcessValidatorMember handles receiving keys for a specific validator
func (svcs *Services) ProcessValidatorMember(state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseValidatorMember(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
index := uint8(event.Index.Uint64()) - 1
v := Validator{
Account: event.Account,
Index: index,
SharedKey: [4]big.Int{*event.Share0, *event.Share1, *event.Share2, *event.Share3},
}
if len(state.Validators) < int(index+1) {
newValList := make([]Validator, int(index+1))
copy(newValList, state.Validators[epoch])
state.Validators[epoch] = newValList
}
state.Validators[epoch][index] = v
ptrGroupShare := [4]*big.Int{
&v.SharedKey[0], &v.SharedKey[1],
&v.SharedKey[2], &v.SharedKey[3]}
groupShare, err := bn256.MarshalG2Big(ptrGroupShare)
if err != nil {
svcs.logger.Errorf("Failed to marshal groupShare: %v", err)
return err
}
svcs.logger.Debugf("Validator member %v %x", v.Index, groupShare)
err = svcs.checkValidatorSet(state, epoch)
if err != nil {
return err
}
return nil
}
func (svcs *Services) checkValidatorSet(state *State, epoch uint32) error {
logger := svcs.logger
// Make sure we've received a validator set event
validatorSet, present := state.ValidatorSets[epoch]
if !present {
logger.Warnf("No validator set received for epoch %v", epoch)
}
// Make sure we've received a validator member event
validators, present := state.Validators[epoch]
if !present {
logger.Warnf("No validators received for epoch %v", epoch)
}
// See how many validator members we've seen and how many we expect
receivedCount := len(validators)
expectedCount := int(validatorSet.ValidatorCount)
// Log validator set status
logLevel := logrus.WarnLevel
if receivedCount == expectedCount && expectedCount > 0 {
logLevel = logrus.InfoLevel
}
logger.Logf(logLevel, "Epoch: %v NotBeforeMadNetHeight: %v Validators Received: %v of %v", epoch, validatorSet.NotBeforeMadNetHeight, receivedCount, expectedCount)
if receivedCount == expectedCount {
// Start by building the ValidatorSet
ptrGroupKey := [4]*big.Int{&validatorSet.GroupKey[0], &validatorSet.GroupKey[1], &validatorSet.GroupKey[2], &validatorSet.GroupKey[3]}
groupKey, err := bn256.MarshalG2Big(ptrGroupKey)
if err != nil {
logger.Errorf("Failed to marshal groupKey: %v", err)
return err
}
vs := &objs.ValidatorSet{
GroupKey: groupKey,
Validators: make([]*objs.Validator, validatorSet.ValidatorCount),
NotBefore: validatorSet.NotBeforeMadNetHeight}
// Loop over the Validators
for _, validator := range validators {
ptrGroupShare := [4]*big.Int{
&validator.SharedKey[0], &validator.SharedKey[1],
&validator.SharedKey[2], &validator.SharedKey[3]}
groupShare, err := bn256.MarshalG2Big(ptrGroupShare)
if err != nil {
logger.Errorf("Failed to marshal groupShare: %v", err)
return err
}
v := &objs.Validator{
VAddr: validator.Account.Bytes(),
GroupShare: groupShare}
vs.Validators[validator.Index] = v
logger.Infof("ValidatorMember[%v]: {GroupShare: 0x%x, VAddr: %x}", validator.Index, groupShare, v.VAddr)
}
logger.Infof("ValidatorSet: {GroupKey: 0x%x, NotBefore: %v, Validators: %v }", vs.GroupKey, vs.NotBefore, vs.Validators)
err = svcs.ah.AddValidatorSet(vs)
if err != nil {
logger.Errorf("Unable to add validator set: %v", err) // TODO handle -- MUST retry or consensus shuts down
}
}
return nil
}
// ProcessDepositReceived handles logic around receiving a deposit event
func (svcs *Services) ProcessDepositReceived(state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
logger := svcs.logger
event, err := c.Deposit.ParseDepositReceived(log)
if err != nil {
return err
}
logger.Infof("deposit depositID:%x ethereum:0x%x amount:%d",
event.DepositID, event.Depositor, event.Amount)
return svcs.consensusDb.Update(func(txn *badger.Txn) error {
depositNonce := event.DepositID.Bytes()
account := event.Depositor.Bytes()
owner := &aobjs.Owner{}
err := owner.New(account, constants.CurveSecp256k1)
if err != nil | {
logger.Debugf("Error in Services.ProcessDepositReceived at owner.New: %v", err)
return err
} | conditional_block |
|
handlers_events.go | &EthDKGSchedule{}
schedule.RegistrationStart = log.BlockNumber
schedule.RegistrationEnd = event.RegistrationEnds.Uint64()
schedule.ShareDistributionStart = schedule.RegistrationEnd + 1
schedule.ShareDistributionEnd = event.ShareDistributionEnds.Uint64()
schedule.DisputeStart = schedule.ShareDistributionEnd + 1
schedule.DisputeEnd = event.DisputeEnds.Uint64()
schedule.KeyShareSubmissionStart = schedule.DisputeEnd + 1
schedule.KeyShareSubmissionEnd = event.KeyShareSubmissionEnds.Uint64()
schedule.MPKSubmissionStart = schedule.KeyShareSubmissionEnd + 1
schedule.MPKSubmissionEnd = event.MpkSubmissionEnds.Uint64()
schedule.GPKJSubmissionStart = schedule.MPKSubmissionEnd + 1
schedule.GPKJSubmissionEnd = event.GpkjSubmissionEnds.Uint64()
schedule.GPKJGroupAccusationStart = schedule.GPKJSubmissionEnd + 1
schedule.GPKJGroupAccusationEnd = event.GpkjDisputeEnds.Uint64()
schedule.CompleteStart = schedule.GPKJGroupAccusationEnd + 1
schedule.CompleteEnd = event.DkgComplete.Uint64()
// TODO associate names with these also to help with debugging/logging
ib := make(map[uint64]func(*State, uint64) error)
ib[schedule.ShareDistributionStart] = svcs.DoDistributeShares // Do ShareDistribution
ib[schedule.DisputeStart] = svcs.DoSubmitDispute // Do Disputes
ib[schedule.KeyShareSubmissionStart] = svcs.DoSubmitKeyShare // Do KeyShareSubmission
ib[schedule.MPKSubmissionStart] = svcs.DoSubmitMasterPublicKey // Do MPKSubmission
ib[schedule.GPKJSubmissionStart] = svcs.DoSubmitGPKj // Do GPKJSubmission
ib[schedule.GPKJGroupAccusationStart] = svcs.DoGroupAccusationGPKj // Do GPKJDisputes
ib[schedule.CompleteStart] = svcs.DoSuccessfulCompletion // Do SuccessfulCompletion
logger.Infof("Adding block processors for %v", ib)
state.interestingBlocks = ib
acct := eth.GetDefaultAccount()
state.ethdkg = NewEthDKGState()
state.ethdkg.Address = acct.Address
state.ethdkg.Schedule = schedule
state.ethdkg.TransportPrivateKey = private
state.ethdkg.TransportPublicKey = public
taskLogger := logging.GetLogger("rt")
task := dkgtasks.NewRegisterTask(taskLogger, eth, acct,
state.ethdkg.TransportPublicKey,
state.ethdkg.Schedule.RegistrationEnd)
state.ethdkg.RegistrationTH = svcs.taskMan.NewTaskHandler(eth.Timeout(), eth.RetryDelay(), task)
state.ethdkg.RegistrationTH.Start()
} else {
logger.Infof("Not participating in DKG... registration ends at height %v but height %v is finalized.",
event.RegistrationEnds, state.HighestBlockFinalized)
// state.ethdkg = EthDKGState{} // TODO I need to cancel any TaskHandlers too
}
return nil
}
// ProcessShareDistribution accumulates everyones shares ETHDKG
func (svcs *Services) ProcessShareDistribution(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessShareDistribution()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring share distribution since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
ethdkg := state.ethdkg
event, err := c.Ethdkg.ParseShareDistribution(log)
if err != nil {
return err
}
ethdkg.Commitments[event.Issuer] = event.Commitments
ethdkg.EncryptedShares[event.Issuer] = event.EncryptedShares
return nil
}
// ProcessKeyShareSubmission ETHDKG
func (svcs *Services) ProcessKeyShareSubmission(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessKeyShareSubmission()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring key share submission since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseKeyShareSubmission(log)
if err != nil {
return err
}
addr := event.Issuer
keyshareG1 := event.KeyShareG1
keyshareG1Proof := event.KeyShareG1CorrectnessProof
keyshareG2 := event.KeyShareG2
logger.Infof("keyshareG1:%v keyshareG2:%v", dkgtasks.FormatBigIntSlice(keyshareG1[:]), dkgtasks.FormatBigIntSlice(keyshareG2[:]))
state.ethdkg.KeyShareG1s[addr] = keyshareG1
state.ethdkg.KeyShareG1CorrectnessProofs[addr] = keyshareG1Proof
state.ethdkg.KeyShareG2s[addr] = keyshareG2
return nil
}
// ProcessValidatorSet handles receiving validatorSet changes
func (svcs *Services) ProcessValidatorSet(state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
updatedState := state
event, err := c.Ethdkg.ParseValidatorSet(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
vs := state.ValidatorSets[epoch]
vs.NotBeforeMadNetHeight = event.MadHeight
vs.ValidatorCount = event.ValidatorCount
vs.GroupKey[0] = *event.GroupKey0
vs.GroupKey[1] = *event.GroupKey1
vs.GroupKey[2] = *event.GroupKey2
vs.GroupKey[3] = *event.GroupKey3
updatedState.ValidatorSets[epoch] = vs
err = svcs.checkValidatorSet(updatedState, epoch)
if err != nil {
return err
}
return nil
}
// ProcessValidatorMember handles receiving keys for a specific validator
func (svcs *Services) | (state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseValidatorMember(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
index := uint8(event.Index.Uint64()) - 1
v := Validator{
Account: event.Account,
Index: index,
SharedKey: [4]big.Int{*event.Share0, *event.Share1, *event.Share2, *event.Share3},
}
if len(state.Validators) < int(index+1) {
newValList := make([]Validator, int(index+1))
copy(newValList, state.Validators[epoch])
state.Validators[epoch] = newValList
}
state.Validators[epoch][index] = v
ptrGroupShare := [4]*big.Int{
&v.SharedKey[0], &v.SharedKey[1],
&v.SharedKey[2], &v.SharedKey[3]}
groupShare, err := bn256.MarshalG2Big(ptrGroupShare)
if err != nil {
svcs.logger.Errorf("Failed to marshal groupShare: %v", err)
return err
}
svcs.logger.Debugf("Validator member %v %x", v.Index, groupShare)
err = svcs.checkValidatorSet(state, epoch)
if err != nil {
return err
}
return nil
}
func (svcs *Services) checkValidatorSet(state *State, epoch uint32) error {
logger := svcs.logger
// Make sure we've received a validator set event
validatorSet, present := state.ValidatorSets[epoch]
if !present {
logger.Warnf("No validator set received for epoch %v", epoch)
}
// Make sure we've received a validator member event
validators, present := state.Validators[epoch]
if !present {
logger.Warnf("No validators received for epoch %v", epoch)
}
// See how many validator members we've seen and how many we expect
receivedCount := len(validators)
expectedCount := int(validatorSet.ValidatorCount)
// Log validator set status
logLevel := logrus.WarnLevel
if receivedCount == expectedCount && expectedCount > 0 {
logLevel = logrus.InfoLevel
}
logger.Logf(logLevel, "Epoch: %v NotBeforeMadNetHeight: %v Validators Received: %v of %v", epoch, validatorSet.NotBeforeMadNetHeight, receivedCount, expectedCount)
if receivedCount == expectedCount {
// Start by building the ValidatorSet
ptrGroupKey := [4]*big.Int{&validatorSet.GroupKey[0], &validatorSet | ProcessValidatorMember | identifier_name |
handlers_events.go | &EthDKGSchedule{}
schedule.RegistrationStart = log.BlockNumber
schedule.RegistrationEnd = event.RegistrationEnds.Uint64()
schedule.ShareDistributionStart = schedule.RegistrationEnd + 1
schedule.ShareDistributionEnd = event.ShareDistributionEnds.Uint64()
schedule.DisputeStart = schedule.ShareDistributionEnd + 1
schedule.DisputeEnd = event.DisputeEnds.Uint64()
schedule.KeyShareSubmissionStart = schedule.DisputeEnd + 1
schedule.KeyShareSubmissionEnd = event.KeyShareSubmissionEnds.Uint64()
schedule.MPKSubmissionStart = schedule.KeyShareSubmissionEnd + 1
schedule.MPKSubmissionEnd = event.MpkSubmissionEnds.Uint64()
schedule.GPKJSubmissionStart = schedule.MPKSubmissionEnd + 1
schedule.GPKJSubmissionEnd = event.GpkjSubmissionEnds.Uint64()
schedule.GPKJGroupAccusationStart = schedule.GPKJSubmissionEnd + 1
schedule.GPKJGroupAccusationEnd = event.GpkjDisputeEnds.Uint64()
schedule.CompleteStart = schedule.GPKJGroupAccusationEnd + 1
schedule.CompleteEnd = event.DkgComplete.Uint64()
// TODO associate names with these also to help with debugging/logging
ib := make(map[uint64]func(*State, uint64) error)
ib[schedule.ShareDistributionStart] = svcs.DoDistributeShares // Do ShareDistribution
ib[schedule.DisputeStart] = svcs.DoSubmitDispute // Do Disputes
ib[schedule.KeyShareSubmissionStart] = svcs.DoSubmitKeyShare // Do KeyShareSubmission
ib[schedule.MPKSubmissionStart] = svcs.DoSubmitMasterPublicKey // Do MPKSubmission
ib[schedule.GPKJSubmissionStart] = svcs.DoSubmitGPKj // Do GPKJSubmission
ib[schedule.GPKJGroupAccusationStart] = svcs.DoGroupAccusationGPKj // Do GPKJDisputes
ib[schedule.CompleteStart] = svcs.DoSuccessfulCompletion // Do SuccessfulCompletion
logger.Infof("Adding block processors for %v", ib)
state.interestingBlocks = ib
acct := eth.GetDefaultAccount()
state.ethdkg = NewEthDKGState()
state.ethdkg.Address = acct.Address
state.ethdkg.Schedule = schedule
state.ethdkg.TransportPrivateKey = private
state.ethdkg.TransportPublicKey = public
taskLogger := logging.GetLogger("rt")
task := dkgtasks.NewRegisterTask(taskLogger, eth, acct,
state.ethdkg.TransportPublicKey,
state.ethdkg.Schedule.RegistrationEnd)
state.ethdkg.RegistrationTH = svcs.taskMan.NewTaskHandler(eth.Timeout(), eth.RetryDelay(), task)
state.ethdkg.RegistrationTH.Start()
} else {
logger.Infof("Not participating in DKG... registration ends at height %v but height %v is finalized.",
event.RegistrationEnds, state.HighestBlockFinalized)
// state.ethdkg = EthDKGState{} // TODO I need to cancel any TaskHandlers too
}
return nil
}
// ProcessShareDistribution accumulates everyones shares ETHDKG
func (svcs *Services) ProcessShareDistribution(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessShareDistribution()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring share distribution since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
ethdkg := state.ethdkg
event, err := c.Ethdkg.ParseShareDistribution(log)
if err != nil {
return err
}
ethdkg.Commitments[event.Issuer] = event.Commitments
ethdkg.EncryptedShares[event.Issuer] = event.EncryptedShares
return nil
}
// ProcessKeyShareSubmission ETHDKG
func (svcs *Services) ProcessKeyShareSubmission(state *State, log types.Log) error {
logger := svcs.logger
logger.Info(strings.Repeat("-", 60))
logger.Info("ProcessKeyShareSubmission()")
logger.Info(strings.Repeat("-", 60))
if !ETHDKGInProgress(state.ethdkg, log.BlockNumber) {
logger.Warn("Ignoring key share submission since we are not participating this round...")
return ErrCanNotContinue
}
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseKeyShareSubmission(log)
if err != nil {
return err
}
addr := event.Issuer
keyshareG1 := event.KeyShareG1
keyshareG1Proof := event.KeyShareG1CorrectnessProof
keyshareG2 := event.KeyShareG2
logger.Infof("keyshareG1:%v keyshareG2:%v", dkgtasks.FormatBigIntSlice(keyshareG1[:]), dkgtasks.FormatBigIntSlice(keyshareG2[:]))
state.ethdkg.KeyShareG1s[addr] = keyshareG1
state.ethdkg.KeyShareG1CorrectnessProofs[addr] = keyshareG1Proof
state.ethdkg.KeyShareG2s[addr] = keyshareG2
return nil
}
// ProcessValidatorSet handles receiving validatorSet changes
func (svcs *Services) ProcessValidatorSet(state *State, log types.Log) error {
|
updatedState := state
event, err := c.Ethdkg.ParseValidatorSet(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
vs := state.ValidatorSets[epoch]
vs.NotBeforeMadNetHeight = event.MadHeight
vs.ValidatorCount = event.ValidatorCount
vs.GroupKey[0] = *event.GroupKey0
vs.GroupKey[1] = *event.GroupKey1
vs.GroupKey[2] = *event.GroupKey2
vs.GroupKey[3] = *event.GroupKey3
updatedState.ValidatorSets[epoch] = vs
err = svcs.checkValidatorSet(updatedState, epoch)
if err != nil {
return err
}
return nil
}
// ProcessValidatorMember handles receiving keys for a specific validator
func (svcs *Services) ProcessValidatorMember(state *State, log types.Log) error {
eth := svcs.eth
c := eth.Contracts()
event, err := c.Ethdkg.ParseValidatorMember(log)
if err != nil {
return err
}
epoch := uint32(event.Epoch.Int64())
index := uint8(event.Index.Uint64()) - 1
v := Validator{
Account: event.Account,
Index: index,
SharedKey: [4]big.Int{*event.Share0, *event.Share1, *event.Share2, *event.Share3},
}
if len(state.Validators) < int(index+1) {
newValList := make([]Validator, int(index+1))
copy(newValList, state.Validators[epoch])
state.Validators[epoch] = newValList
}
state.Validators[epoch][index] = v
ptrGroupShare := [4]*big.Int{
&v.SharedKey[0], &v.SharedKey[1],
&v.SharedKey[2], &v.SharedKey[3]}
groupShare, err := bn256.MarshalG2Big(ptrGroupShare)
if err != nil {
svcs.logger.Errorf("Failed to marshal groupShare: %v", err)
return err
}
svcs.logger.Debugf("Validator member %v %x", v.Index, groupShare)
err = svcs.checkValidatorSet(state, epoch)
if err != nil {
return err
}
return nil
}
func (svcs *Services) checkValidatorSet(state *State, epoch uint32) error {
logger := svcs.logger
// Make sure we've received a validator set event
validatorSet, present := state.ValidatorSets[epoch]
if !present {
logger.Warnf("No validator set received for epoch %v", epoch)
}
// Make sure we've received a validator member event
validators, present := state.Validators[epoch]
if !present {
logger.Warnf("No validators received for epoch %v", epoch)
}
// See how many validator members we've seen and how many we expect
receivedCount := len(validators)
expectedCount := int(validatorSet.ValidatorCount)
// Log validator set status
logLevel := logrus.WarnLevel
if receivedCount == expectedCount && expectedCount > 0 {
logLevel = logrus.InfoLevel
}
logger.Logf(logLevel, "Epoch: %v NotBeforeMadNetHeight: %v Validators Received: %v of %v", epoch, validatorSet.NotBeforeMadNetHeight, receivedCount, expectedCount)
if receivedCount == expectedCount {
// Start by building the ValidatorSet
ptrGroupKey := [4]*big.Int{&validatorSet.GroupKey[0], &validatorSet | eth := svcs.eth
c := eth.Contracts() | random_line_split |
migrations.rs | DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
Pallet::<T>::on_chain_storage_version() == 13,
"v13 not applied"
);
frame_support::ensure!(
!StorageVersion::<T>::exists(),
"Storage version not migrated correctly"
);
Ok(())
}
}
}
pub mod v12 {
use super::*;
use frame_support::{pallet_prelude::ValueQuery, storage_alias};
#[storage_alias]
type HistoryDepth<T: Config> = StorageValue<Pallet<T>, u32, ValueQuery>;
/// Clean up `HistoryDepth` from storage.
///
/// We will be depending on the configurable value of `HistoryDepth` post
/// this release.
pub struct MigrateToV12<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 { | traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
T::DbWeight::get().reads_writes(1, 1)
} else {
log!(warn, "MigrateToV10 should be removed.");
T::DbWeight::get().reads(1)
}
}
}
}
pub mod v9 {
use super::*;
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_std::vec::Vec;
/// Migration implementation that injects all validators into sorted list.
///
/// This is only useful for chains that started their `VoterList` just based on nominators.
pub struct InjectValidatorsIntoVoterList<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
fn on_runtime_upgrade() -> Weight {
| use super::*;
use frame_support::{
storage::migration::move_pallet, | random_line_split |
migrations.rs | DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
Pallet::<T>::on_chain_storage_version() == 13,
"v13 not applied"
);
frame_support::ensure!(
!StorageVersion::<T>::exists(),
"Storage version not migrated correctly"
);
Ok(())
}
}
}
pub mod v12 {
use super::*;
use frame_support::{pallet_prelude::ValueQuery, storage_alias};
#[storage_alias]
type HistoryDepth<T: Config> = StorageValue<Pallet<T>, u32, ValueQuery>;
/// Clean up `HistoryDepth` from storage.
///
/// We will be depending on the configurable value of `HistoryDepth` post
/// this release.
pub struct | <T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 {
use super::*;
use frame_support::{
storage::migration::move_pallet,
traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
T::DbWeight::get().reads_writes(1, 1)
} else {
log!(warn, "MigrateToV10 should be removed.");
T::DbWeight::get().reads(1)
}
}
}
}
pub mod v9 {
use super::*;
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_std::vec::Vec;
/// Migration implementation that injects all validators into sorted list.
///
/// This is only useful for chains that started their `VoterList` just based on nominators.
pub struct InjectValidatorsIntoVoterList<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
fn on_runtime_upgrade() -> Weight | MigrateToV12 | identifier_name |
migrations.rs | >(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 {
use super::*;
use frame_support::{
storage::migration::move_pallet,
traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
T::DbWeight::get().reads_writes(1, 1)
} else {
log!(warn, "MigrateToV10 should be removed.");
T::DbWeight::get().reads(1)
}
}
}
}
pub mod v9 {
use super::*;
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_std::vec::Vec;
/// Migration implementation that injects all validators into sorted list.
///
/// This is only useful for chains that started their `VoterList` just based on nominators.
pub struct InjectValidatorsIntoVoterList<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
fn on_runtime_upgrade() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0 | {
let prev_count = T::VoterList::count();
let weight_of_cached = Pallet::<T>::weight_of_fn();
for (v, _) in Validators::<T>::iter() {
let weight = weight_of_cached(&v);
let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| {
log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err)
});
}
log!(
info,
"injected a total of {} new voters, prev count: {} next count: {}, updating to version 9",
Validators::<T>::count(),
prev_count,
T::VoterList::count(),
);
StorageVersion::<T>::put(ObsoleteReleases::V9_0_0);
T::BlockWeights::get().max_block | conditional_block |
|
migrations.rs |
fn on_runtime_upgrade() -> Weight {
let current = Pallet::<T>::current_storage_version();
let onchain = StorageVersion::<T>::get();
if current == 13 && onchain == ObsoleteReleases::V12_0_0 {
StorageVersion::<T>::kill();
current.put::<Pallet<T>>();
log!(info, "v13 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v13, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
Pallet::<T>::on_chain_storage_version() == 13,
"v13 not applied"
);
frame_support::ensure!(
!StorageVersion::<T>::exists(),
"Storage version not migrated correctly"
);
Ok(())
}
}
}
pub mod v12 {
use super::*;
use frame_support::{pallet_prelude::ValueQuery, storage_alias};
#[storage_alias]
type HistoryDepth<T: Config> = StorageValue<Pallet<T>, u32, ValueQuery>;
/// Clean up `HistoryDepth` from storage.
///
/// We will be depending on the configurable value of `HistoryDepth` post
/// this release.
pub struct MigrateToV12<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 {
use super::*;
use frame_support::{
storage::migration::move_pallet,
traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
| {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"Required v12 before upgrading to v13"
);
Ok(Default::default())
} | identifier_body |
|
exec.go | command's output to the system logs, instead of the
// task logs. This can be used to collect diagnostic data in the background of a running task.
SystemLog bool `mapstructure:"system_log"`
// WorkingDir is the working directory to start the shell in.
WorkingDir string `mapstructure:"working_dir"`
// IgnoreStandardOutput and IgnoreStandardError allow users to
// elect to ignore either standard out and/or standard output.
IgnoreStandardOutput bool `mapstructure:"ignore_standard_out"`
IgnoreStandardError bool `mapstructure:"ignore_standard_error"`
// RedirectStandardErrorToOutput allows you to capture
// standard error in the same stream as standard output. This
// improves the synchronization of these streams.
RedirectStandardErrorToOutput bool `mapstructure:"redirect_standard_error_to_output"`
// ContinueOnError determines whether or not a failed return code
// should cause the task to be marked as failed. Setting this to true
// allows following commands to execute even if this shell command fails.
ContinueOnError bool `mapstructure:"continue_on_err"`
// KeepEmptyArgs will allow empty arguments in commands if set to true
// note that non-blank whitespace arguments are never stripped
KeepEmptyArgs bool `mapstructure:"keep_empty_args"`
base
}
func subprocessExecFactory() Command { return &subprocessExec{} }
func (c *subprocessExec) Name() string { return "subprocess.exec" }
func (c *subprocessExec) | (params map[string]interface{}) error {
err := mapstructure.Decode(params, c)
if err != nil {
return errors.Wrap(err, "decoding mapstructure params")
}
if c.Command != "" {
if c.Binary != "" || len(c.Args) > 0 {
return errors.New("must specify command as either binary and arguments, or a command string, but not both")
}
args, err := shlex.Split(c.Command)
if err != nil {
return errors.Wrapf(err, "parsing command using shell lexing rules")
}
if len(args) == 0 {
return errors.Errorf("command could not be split using shell lexing rules")
}
c.Binary = args[0]
if len(args) > 1 {
c.Args = args[1:]
}
}
if c.Silent {
c.IgnoreStandardError = true
c.IgnoreStandardOutput = true
}
if c.IgnoreStandardOutput && c.RedirectStandardErrorToOutput {
return errors.New("cannot both ignore standard output and redirect standard error to it")
}
if c.Env == nil {
c.Env = make(map[string]string)
}
return nil
}
func (c *subprocessExec) doExpansions(exp *util.Expansions) error {
var err error
catcher := grip.NewBasicCatcher()
c.WorkingDir, err = exp.ExpandString(c.WorkingDir)
catcher.Wrap(err, "expanding working directory")
c.Binary, err = exp.ExpandString(c.Binary)
catcher.Wrap(err, "expanding binary")
for idx := range c.Args {
c.Args[idx], err = exp.ExpandString(c.Args[idx])
catcher.Wrap(err, "expanding args")
}
for k, v := range c.Env {
c.Env[k], err = exp.ExpandString(v)
catcher.Wrap(err, "expanding environment variables")
}
for idx := range c.Path {
c.Path[idx], err = exp.ExpandString(c.Path[idx])
catcher.Wrap(err, "expanding path to add")
}
return errors.Wrap(catcher.Resolve(), "expanding strings")
}
type modifyEnvOptions struct {
taskID string
workingDir string
tmpDir string
expansions util.Expansions
includeExpansionsInEnv []string
addExpansionsToEnv bool
addToPath []string
}
func defaultAndApplyExpansionsToEnv(env map[string]string, opts modifyEnvOptions) map[string]string {
if env == nil {
env = map[string]string{}
}
if len(opts.addToPath) > 0 {
// Prepend paths to the runtime environment's PATH. More reasonable
// behavior here would be to respect the PATH env var if it's explicitly
// set for the command, but changing this could break existing
// workflows, so we don't do that.
path := make([]string, 0, len(opts.addToPath)+1)
path = append(path, opts.addToPath...)
path = append(path, os.Getenv("PATH"))
env["PATH"] = strings.Join(path, string(filepath.ListSeparator))
}
expansions := opts.expansions.Map()
if opts.addExpansionsToEnv {
for k, v := range expansions {
if k == evergreen.GlobalGitHubTokenExpansion || k == evergreen.GithubAppToken {
//users should not be able to use the global github token expansion
//as it can result in the breaching of Evergreen's GitHub API limit
continue
}
env[k] = v
}
}
for _, expName := range opts.includeExpansionsInEnv {
if val, ok := expansions[expName]; ok && expName != evergreen.GlobalGitHubTokenExpansion && expName != evergreen.GithubAppToken {
env[expName] = val
}
}
env[agentutil.MarkerTaskID] = opts.taskID
env[agentutil.MarkerAgentPID] = strconv.Itoa(os.Getpid())
addTempDirs(env, opts.tmpDir)
if _, ok := env["GOCACHE"]; !ok {
env["GOCACHE"] = filepath.Join(opts.workingDir, ".gocache")
}
if _, ok := env["CI"]; !ok {
env["CI"] = "true"
}
return env
}
func addTempDirs(env map[string]string, dir string) {
for _, key := range []string{"TMP", "TMPDIR", "TEMP"} {
if _, ok := env[key]; ok {
continue
}
env[key] = dir
}
}
func (c *subprocessExec) getProc(ctx context.Context, execPath, taskID string, logger client.LoggerProducer) *jasper.Command {
cmd := c.JasperManager().CreateCommand(ctx).Add(append([]string{execPath}, c.Args...)).
Background(c.Background).Environment(c.Env).Directory(c.WorkingDir).
SuppressStandardError(c.IgnoreStandardError).SuppressStandardOutput(c.IgnoreStandardOutput).RedirectErrorToOutput(c.RedirectStandardErrorToOutput).
ProcConstructor(func(lctx context.Context, opts *options.Create) (jasper.Process, error) {
var cancel context.CancelFunc
var ictx context.Context
if c.Background {
ictx, cancel = context.WithCancel(context.Background())
} else {
ictx = lctx
}
proc, err := c.JasperManager().CreateProcess(ictx, opts)
if err != nil {
if cancel != nil {
cancel()
}
return proc, errors.WithStack(err)
}
if cancel != nil {
grip.Warning(message.WrapError(proc.RegisterTrigger(lctx, func(info jasper.ProcessInfo) {
cancel()
}), "registering canceller for process"))
}
pid := proc.Info(ctx).PID
agentutil.TrackProcess(taskID, pid, logger.System())
if c.Background {
logger.Execution().Debugf("Running process in the background with pid %d.", pid)
} else {
logger.Execution().Infof("Started process with pid %d.", pid)
}
return proc, nil
})
if !c.IgnoreStandardOutput {
if c.SystemLog {
cmd.SetOutputSender(level.Info, logger.System().GetSender())
} else {
cmd.SetOutputSender(level.Info, logger.Task().GetSender())
}
}
if !c.IgnoreStandardError {
if c.SystemLog {
cmd.SetErrorSender(level.Error, logger.System().GetSender())
} else {
cmd.SetErrorSender(level.Error, logger.Task().GetSender())
}
}
return cmd
}
// getExecutablePath returns the path to the command executable to run.
// If the executable is available in the default runtime environment's PATH or
// it is a file path (i.e. it's not supposed to be found in the PATH), then the
// executable binary will be returned as-is. Otherwise if it can't find the
// command in the default PATH locations, the command will fall back to checking
// the command's PATH environment variable for a matching executable location
// (if any).
func (c *subprocessExec) getExecutablePath(logger client.LoggerProducer) (absPath string, err error) {
defaultPath, err := exec.LookPath(c.Binary)
if defaultPath != "" {
return c.Binary, err
}
cmdPath := c.Env["PATH"]
// For non-Windows platforms, the filepath.Separator is always '/'. However,
// for Windows, Go accepts both '\' and '/' as valid file path separators,
// even though the native filepath.Separator for Windows is really '\'. This
// detects both '\' and '/' as valid Windows file path separators.
binaryIsFilePath := strings.Contains(c.Binary, string(filepath.Separator)) || runtime.GOOS == "windows" && strings.Contains(c.Binary, "/")
if len(cmdPath) == 0 || binaryIsFilePath {
return c.Binary, nil
}
logger | ParseParams | identifier_name |
exec.go | will allow empty arguments in commands if set to true
// note that non-blank whitespace arguments are never stripped
KeepEmptyArgs bool `mapstructure:"keep_empty_args"`
base
}
func subprocessExecFactory() Command { return &subprocessExec{} }
func (c *subprocessExec) Name() string { return "subprocess.exec" }
func (c *subprocessExec) ParseParams(params map[string]interface{}) error {
err := mapstructure.Decode(params, c)
if err != nil {
return errors.Wrap(err, "decoding mapstructure params")
}
if c.Command != "" {
if c.Binary != "" || len(c.Args) > 0 {
return errors.New("must specify command as either binary and arguments, or a command string, but not both")
}
args, err := shlex.Split(c.Command)
if err != nil {
return errors.Wrapf(err, "parsing command using shell lexing rules")
}
if len(args) == 0 {
return errors.Errorf("command could not be split using shell lexing rules")
}
c.Binary = args[0]
if len(args) > 1 {
c.Args = args[1:]
}
}
if c.Silent {
c.IgnoreStandardError = true
c.IgnoreStandardOutput = true
}
if c.IgnoreStandardOutput && c.RedirectStandardErrorToOutput {
return errors.New("cannot both ignore standard output and redirect standard error to it")
}
if c.Env == nil {
c.Env = make(map[string]string)
}
return nil
}
func (c *subprocessExec) doExpansions(exp *util.Expansions) error {
var err error
catcher := grip.NewBasicCatcher()
c.WorkingDir, err = exp.ExpandString(c.WorkingDir)
catcher.Wrap(err, "expanding working directory")
c.Binary, err = exp.ExpandString(c.Binary)
catcher.Wrap(err, "expanding binary")
for idx := range c.Args {
c.Args[idx], err = exp.ExpandString(c.Args[idx])
catcher.Wrap(err, "expanding args")
}
for k, v := range c.Env {
c.Env[k], err = exp.ExpandString(v)
catcher.Wrap(err, "expanding environment variables")
}
for idx := range c.Path {
c.Path[idx], err = exp.ExpandString(c.Path[idx])
catcher.Wrap(err, "expanding path to add")
}
return errors.Wrap(catcher.Resolve(), "expanding strings")
}
type modifyEnvOptions struct {
taskID string
workingDir string
tmpDir string
expansions util.Expansions
includeExpansionsInEnv []string
addExpansionsToEnv bool
addToPath []string
}
func defaultAndApplyExpansionsToEnv(env map[string]string, opts modifyEnvOptions) map[string]string {
if env == nil {
env = map[string]string{}
}
if len(opts.addToPath) > 0 {
// Prepend paths to the runtime environment's PATH. More reasonable
// behavior here would be to respect the PATH env var if it's explicitly
// set for the command, but changing this could break existing
// workflows, so we don't do that.
path := make([]string, 0, len(opts.addToPath)+1)
path = append(path, opts.addToPath...)
path = append(path, os.Getenv("PATH"))
env["PATH"] = strings.Join(path, string(filepath.ListSeparator))
}
expansions := opts.expansions.Map()
if opts.addExpansionsToEnv {
for k, v := range expansions {
if k == evergreen.GlobalGitHubTokenExpansion || k == evergreen.GithubAppToken {
//users should not be able to use the global github token expansion
//as it can result in the breaching of Evergreen's GitHub API limit
continue
}
env[k] = v
}
}
for _, expName := range opts.includeExpansionsInEnv {
if val, ok := expansions[expName]; ok && expName != evergreen.GlobalGitHubTokenExpansion && expName != evergreen.GithubAppToken {
env[expName] = val
}
}
env[agentutil.MarkerTaskID] = opts.taskID
env[agentutil.MarkerAgentPID] = strconv.Itoa(os.Getpid())
addTempDirs(env, opts.tmpDir)
if _, ok := env["GOCACHE"]; !ok {
env["GOCACHE"] = filepath.Join(opts.workingDir, ".gocache")
}
if _, ok := env["CI"]; !ok {
env["CI"] = "true"
}
return env
}
func addTempDirs(env map[string]string, dir string) {
for _, key := range []string{"TMP", "TMPDIR", "TEMP"} {
if _, ok := env[key]; ok {
continue
}
env[key] = dir
}
}
func (c *subprocessExec) getProc(ctx context.Context, execPath, taskID string, logger client.LoggerProducer) *jasper.Command {
cmd := c.JasperManager().CreateCommand(ctx).Add(append([]string{execPath}, c.Args...)).
Background(c.Background).Environment(c.Env).Directory(c.WorkingDir).
SuppressStandardError(c.IgnoreStandardError).SuppressStandardOutput(c.IgnoreStandardOutput).RedirectErrorToOutput(c.RedirectStandardErrorToOutput).
ProcConstructor(func(lctx context.Context, opts *options.Create) (jasper.Process, error) {
var cancel context.CancelFunc
var ictx context.Context
if c.Background {
ictx, cancel = context.WithCancel(context.Background())
} else {
ictx = lctx
}
proc, err := c.JasperManager().CreateProcess(ictx, opts)
if err != nil {
if cancel != nil {
cancel()
}
return proc, errors.WithStack(err)
}
if cancel != nil {
grip.Warning(message.WrapError(proc.RegisterTrigger(lctx, func(info jasper.ProcessInfo) {
cancel()
}), "registering canceller for process"))
}
pid := proc.Info(ctx).PID
agentutil.TrackProcess(taskID, pid, logger.System())
if c.Background {
logger.Execution().Debugf("Running process in the background with pid %d.", pid)
} else {
logger.Execution().Infof("Started process with pid %d.", pid)
}
return proc, nil
})
if !c.IgnoreStandardOutput {
if c.SystemLog {
cmd.SetOutputSender(level.Info, logger.System().GetSender())
} else {
cmd.SetOutputSender(level.Info, logger.Task().GetSender())
}
}
if !c.IgnoreStandardError {
if c.SystemLog {
cmd.SetErrorSender(level.Error, logger.System().GetSender())
} else {
cmd.SetErrorSender(level.Error, logger.Task().GetSender())
}
}
return cmd
}
// getExecutablePath returns the path to the command executable to run.
// If the executable is available in the default runtime environment's PATH or
// it is a file path (i.e. it's not supposed to be found in the PATH), then the
// executable binary will be returned as-is. Otherwise if it can't find the
// command in the default PATH locations, the command will fall back to checking
// the command's PATH environment variable for a matching executable location
// (if any).
func (c *subprocessExec) getExecutablePath(logger client.LoggerProducer) (absPath string, err error) {
defaultPath, err := exec.LookPath(c.Binary)
if defaultPath != "" {
return c.Binary, err
}
cmdPath := c.Env["PATH"]
// For non-Windows platforms, the filepath.Separator is always '/'. However,
// for Windows, Go accepts both '\' and '/' as valid file path separators,
// even though the native filepath.Separator for Windows is really '\'. This
// detects both '\' and '/' as valid Windows file path separators.
binaryIsFilePath := strings.Contains(c.Binary, string(filepath.Separator)) || runtime.GOOS == "windows" && strings.Contains(c.Binary, "/")
if len(cmdPath) == 0 || binaryIsFilePath {
return c.Binary, nil
}
logger.Execution().Debug("could not find executable binary in the default runtime environment PATH, falling back to trying the command's PATH")
originalPath := os.Getenv("PATH")
defer func() {
// Try to reset the PATH back to its original state. If this fails, then
// the agent may have a modified PATH, which will affect all future
// agent operations that need to execute processes. However, given that
// the potential ways this could fail to reset seem highly unlikely
// (e.g. due to having insufficient memory to reset it) , it doesn't
// seem worth handling in a better way.
if resetErr := os.Setenv("PATH", originalPath); resetErr != nil {
logger.Execution().Error(errors.Wrap(resetErr, "resetting agent's PATH env var back to its original state").Error())
}
}()
if err := os.Setenv("PATH", cmdPath); err != nil {
return c.Binary, errors.Wrap(err, "setting command's PATH to try fallback executable paths")
}
| return exec.LookPath(c.Binary)
} | random_line_split |
|
exec.go | command's output to the system logs, instead of the
// task logs. This can be used to collect diagnostic data in the background of a running task.
SystemLog bool `mapstructure:"system_log"`
// WorkingDir is the working directory to start the shell in.
WorkingDir string `mapstructure:"working_dir"`
// IgnoreStandardOutput and IgnoreStandardError allow users to
// elect to ignore either standard out and/or standard output.
IgnoreStandardOutput bool `mapstructure:"ignore_standard_out"`
IgnoreStandardError bool `mapstructure:"ignore_standard_error"`
// RedirectStandardErrorToOutput allows you to capture
// standard error in the same stream as standard output. This
// improves the synchronization of these streams.
RedirectStandardErrorToOutput bool `mapstructure:"redirect_standard_error_to_output"`
// ContinueOnError determines whether or not a failed return code
// should cause the task to be marked as failed. Setting this to true
// allows following commands to execute even if this shell command fails.
ContinueOnError bool `mapstructure:"continue_on_err"`
// KeepEmptyArgs will allow empty arguments in commands if set to true
// note that non-blank whitespace arguments are never stripped
KeepEmptyArgs bool `mapstructure:"keep_empty_args"`
base
}
func subprocessExecFactory() Command |
func (c *subprocessExec) Name() string { return "subprocess.exec" }
func (c *subprocessExec) ParseParams(params map[string]interface{}) error {
err := mapstructure.Decode(params, c)
if err != nil {
return errors.Wrap(err, "decoding mapstructure params")
}
if c.Command != "" {
if c.Binary != "" || len(c.Args) > 0 {
return errors.New("must specify command as either binary and arguments, or a command string, but not both")
}
args, err := shlex.Split(c.Command)
if err != nil {
return errors.Wrapf(err, "parsing command using shell lexing rules")
}
if len(args) == 0 {
return errors.Errorf("command could not be split using shell lexing rules")
}
c.Binary = args[0]
if len(args) > 1 {
c.Args = args[1:]
}
}
if c.Silent {
c.IgnoreStandardError = true
c.IgnoreStandardOutput = true
}
if c.IgnoreStandardOutput && c.RedirectStandardErrorToOutput {
return errors.New("cannot both ignore standard output and redirect standard error to it")
}
if c.Env == nil {
c.Env = make(map[string]string)
}
return nil
}
func (c *subprocessExec) doExpansions(exp *util.Expansions) error {
var err error
catcher := grip.NewBasicCatcher()
c.WorkingDir, err = exp.ExpandString(c.WorkingDir)
catcher.Wrap(err, "expanding working directory")
c.Binary, err = exp.ExpandString(c.Binary)
catcher.Wrap(err, "expanding binary")
for idx := range c.Args {
c.Args[idx], err = exp.ExpandString(c.Args[idx])
catcher.Wrap(err, "expanding args")
}
for k, v := range c.Env {
c.Env[k], err = exp.ExpandString(v)
catcher.Wrap(err, "expanding environment variables")
}
for idx := range c.Path {
c.Path[idx], err = exp.ExpandString(c.Path[idx])
catcher.Wrap(err, "expanding path to add")
}
return errors.Wrap(catcher.Resolve(), "expanding strings")
}
type modifyEnvOptions struct {
taskID string
workingDir string
tmpDir string
expansions util.Expansions
includeExpansionsInEnv []string
addExpansionsToEnv bool
addToPath []string
}
func defaultAndApplyExpansionsToEnv(env map[string]string, opts modifyEnvOptions) map[string]string {
if env == nil {
env = map[string]string{}
}
if len(opts.addToPath) > 0 {
// Prepend paths to the runtime environment's PATH. More reasonable
// behavior here would be to respect the PATH env var if it's explicitly
// set for the command, but changing this could break existing
// workflows, so we don't do that.
path := make([]string, 0, len(opts.addToPath)+1)
path = append(path, opts.addToPath...)
path = append(path, os.Getenv("PATH"))
env["PATH"] = strings.Join(path, string(filepath.ListSeparator))
}
expansions := opts.expansions.Map()
if opts.addExpansionsToEnv {
for k, v := range expansions {
if k == evergreen.GlobalGitHubTokenExpansion || k == evergreen.GithubAppToken {
//users should not be able to use the global github token expansion
//as it can result in the breaching of Evergreen's GitHub API limit
continue
}
env[k] = v
}
}
for _, expName := range opts.includeExpansionsInEnv {
if val, ok := expansions[expName]; ok && expName != evergreen.GlobalGitHubTokenExpansion && expName != evergreen.GithubAppToken {
env[expName] = val
}
}
env[agentutil.MarkerTaskID] = opts.taskID
env[agentutil.MarkerAgentPID] = strconv.Itoa(os.Getpid())
addTempDirs(env, opts.tmpDir)
if _, ok := env["GOCACHE"]; !ok {
env["GOCACHE"] = filepath.Join(opts.workingDir, ".gocache")
}
if _, ok := env["CI"]; !ok {
env["CI"] = "true"
}
return env
}
func addTempDirs(env map[string]string, dir string) {
for _, key := range []string{"TMP", "TMPDIR", "TEMP"} {
if _, ok := env[key]; ok {
continue
}
env[key] = dir
}
}
func (c *subprocessExec) getProc(ctx context.Context, execPath, taskID string, logger client.LoggerProducer) *jasper.Command {
cmd := c.JasperManager().CreateCommand(ctx).Add(append([]string{execPath}, c.Args...)).
Background(c.Background).Environment(c.Env).Directory(c.WorkingDir).
SuppressStandardError(c.IgnoreStandardError).SuppressStandardOutput(c.IgnoreStandardOutput).RedirectErrorToOutput(c.RedirectStandardErrorToOutput).
ProcConstructor(func(lctx context.Context, opts *options.Create) (jasper.Process, error) {
var cancel context.CancelFunc
var ictx context.Context
if c.Background {
ictx, cancel = context.WithCancel(context.Background())
} else {
ictx = lctx
}
proc, err := c.JasperManager().CreateProcess(ictx, opts)
if err != nil {
if cancel != nil {
cancel()
}
return proc, errors.WithStack(err)
}
if cancel != nil {
grip.Warning(message.WrapError(proc.RegisterTrigger(lctx, func(info jasper.ProcessInfo) {
cancel()
}), "registering canceller for process"))
}
pid := proc.Info(ctx).PID
agentutil.TrackProcess(taskID, pid, logger.System())
if c.Background {
logger.Execution().Debugf("Running process in the background with pid %d.", pid)
} else {
logger.Execution().Infof("Started process with pid %d.", pid)
}
return proc, nil
})
if !c.IgnoreStandardOutput {
if c.SystemLog {
cmd.SetOutputSender(level.Info, logger.System().GetSender())
} else {
cmd.SetOutputSender(level.Info, logger.Task().GetSender())
}
}
if !c.IgnoreStandardError {
if c.SystemLog {
cmd.SetErrorSender(level.Error, logger.System().GetSender())
} else {
cmd.SetErrorSender(level.Error, logger.Task().GetSender())
}
}
return cmd
}
// getExecutablePath returns the path to the command executable to run.
// If the executable is available in the default runtime environment's PATH or
// it is a file path (i.e. it's not supposed to be found in the PATH), then the
// executable binary will be returned as-is. Otherwise if it can't find the
// command in the default PATH locations, the command will fall back to checking
// the command's PATH environment variable for a matching executable location
// (if any).
func (c *subprocessExec) getExecutablePath(logger client.LoggerProducer) (absPath string, err error) {
defaultPath, err := exec.LookPath(c.Binary)
if defaultPath != "" {
return c.Binary, err
}
cmdPath := c.Env["PATH"]
// For non-Windows platforms, the filepath.Separator is always '/'. However,
// for Windows, Go accepts both '\' and '/' as valid file path separators,
// even though the native filepath.Separator for Windows is really '\'. This
// detects both '\' and '/' as valid Windows file path separators.
binaryIsFilePath := strings.Contains(c.Binary, string(filepath.Separator)) || runtime.GOOS == "windows" && strings.Contains(c.Binary, "/")
if len(cmdPath) == 0 || binaryIsFilePath {
return c.Binary, nil
}
logger | { return &subprocessExec{} } | identifier_body |
exec.go | command's output to the system logs, instead of the
// task logs. This can be used to collect diagnostic data in the background of a running task.
SystemLog bool `mapstructure:"system_log"`
// WorkingDir is the working directory to start the shell in.
WorkingDir string `mapstructure:"working_dir"`
// IgnoreStandardOutput and IgnoreStandardError allow users to
// elect to ignore either standard out and/or standard output.
IgnoreStandardOutput bool `mapstructure:"ignore_standard_out"`
IgnoreStandardError bool `mapstructure:"ignore_standard_error"`
// RedirectStandardErrorToOutput allows you to capture
// standard error in the same stream as standard output. This
// improves the synchronization of these streams.
RedirectStandardErrorToOutput bool `mapstructure:"redirect_standard_error_to_output"`
// ContinueOnError determines whether or not a failed return code
// should cause the task to be marked as failed. Setting this to true
// allows following commands to execute even if this shell command fails.
ContinueOnError bool `mapstructure:"continue_on_err"`
// KeepEmptyArgs will allow empty arguments in commands if set to true
// note that non-blank whitespace arguments are never stripped
KeepEmptyArgs bool `mapstructure:"keep_empty_args"`
base
}
func subprocessExecFactory() Command { return &subprocessExec{} }
func (c *subprocessExec) Name() string { return "subprocess.exec" }
func (c *subprocessExec) ParseParams(params map[string]interface{}) error {
err := mapstructure.Decode(params, c)
if err != nil {
return errors.Wrap(err, "decoding mapstructure params")
}
if c.Command != "" {
if c.Binary != "" || len(c.Args) > 0 {
return errors.New("must specify command as either binary and arguments, or a command string, but not both")
}
args, err := shlex.Split(c.Command)
if err != nil {
return errors.Wrapf(err, "parsing command using shell lexing rules")
}
if len(args) == 0 {
return errors.Errorf("command could not be split using shell lexing rules")
}
c.Binary = args[0]
if len(args) > 1 {
c.Args = args[1:]
}
}
if c.Silent {
c.IgnoreStandardError = true
c.IgnoreStandardOutput = true
}
if c.IgnoreStandardOutput && c.RedirectStandardErrorToOutput {
return errors.New("cannot both ignore standard output and redirect standard error to it")
}
if c.Env == nil {
c.Env = make(map[string]string)
}
return nil
}
func (c *subprocessExec) doExpansions(exp *util.Expansions) error {
var err error
catcher := grip.NewBasicCatcher()
c.WorkingDir, err = exp.ExpandString(c.WorkingDir)
catcher.Wrap(err, "expanding working directory")
c.Binary, err = exp.ExpandString(c.Binary)
catcher.Wrap(err, "expanding binary")
for idx := range c.Args {
c.Args[idx], err = exp.ExpandString(c.Args[idx])
catcher.Wrap(err, "expanding args")
}
for k, v := range c.Env {
c.Env[k], err = exp.ExpandString(v)
catcher.Wrap(err, "expanding environment variables")
}
for idx := range c.Path {
c.Path[idx], err = exp.ExpandString(c.Path[idx])
catcher.Wrap(err, "expanding path to add")
}
return errors.Wrap(catcher.Resolve(), "expanding strings")
}
type modifyEnvOptions struct {
taskID string
workingDir string
tmpDir string
expansions util.Expansions
includeExpansionsInEnv []string
addExpansionsToEnv bool
addToPath []string
}
func defaultAndApplyExpansionsToEnv(env map[string]string, opts modifyEnvOptions) map[string]string {
if env == nil {
env = map[string]string{}
}
if len(opts.addToPath) > 0 {
// Prepend paths to the runtime environment's PATH. More reasonable
// behavior here would be to respect the PATH env var if it's explicitly
// set for the command, but changing this could break existing
// workflows, so we don't do that.
path := make([]string, 0, len(opts.addToPath)+1)
path = append(path, opts.addToPath...)
path = append(path, os.Getenv("PATH"))
env["PATH"] = strings.Join(path, string(filepath.ListSeparator))
}
expansions := opts.expansions.Map()
if opts.addExpansionsToEnv {
for k, v := range expansions {
if k == evergreen.GlobalGitHubTokenExpansion || k == evergreen.GithubAppToken {
//users should not be able to use the global github token expansion
//as it can result in the breaching of Evergreen's GitHub API limit
continue
}
env[k] = v
}
}
for _, expName := range opts.includeExpansionsInEnv {
if val, ok := expansions[expName]; ok && expName != evergreen.GlobalGitHubTokenExpansion && expName != evergreen.GithubAppToken {
env[expName] = val
}
}
env[agentutil.MarkerTaskID] = opts.taskID
env[agentutil.MarkerAgentPID] = strconv.Itoa(os.Getpid())
addTempDirs(env, opts.tmpDir)
if _, ok := env["GOCACHE"]; !ok {
env["GOCACHE"] = filepath.Join(opts.workingDir, ".gocache")
}
if _, ok := env["CI"]; !ok {
env["CI"] = "true"
}
return env
}
func addTempDirs(env map[string]string, dir string) {
for _, key := range []string{"TMP", "TMPDIR", "TEMP"} {
if _, ok := env[key]; ok {
continue
}
env[key] = dir
}
}
func (c *subprocessExec) getProc(ctx context.Context, execPath, taskID string, logger client.LoggerProducer) *jasper.Command {
cmd := c.JasperManager().CreateCommand(ctx).Add(append([]string{execPath}, c.Args...)).
Background(c.Background).Environment(c.Env).Directory(c.WorkingDir).
SuppressStandardError(c.IgnoreStandardError).SuppressStandardOutput(c.IgnoreStandardOutput).RedirectErrorToOutput(c.RedirectStandardErrorToOutput).
ProcConstructor(func(lctx context.Context, opts *options.Create) (jasper.Process, error) {
var cancel context.CancelFunc
var ictx context.Context
if c.Background {
ictx, cancel = context.WithCancel(context.Background())
} else {
ictx = lctx
}
proc, err := c.JasperManager().CreateProcess(ictx, opts)
if err != nil {
if cancel != nil {
cancel()
}
return proc, errors.WithStack(err)
}
if cancel != nil {
grip.Warning(message.WrapError(proc.RegisterTrigger(lctx, func(info jasper.ProcessInfo) {
cancel()
}), "registering canceller for process"))
}
pid := proc.Info(ctx).PID
agentutil.TrackProcess(taskID, pid, logger.System())
if c.Background {
logger.Execution().Debugf("Running process in the background with pid %d.", pid)
} else {
logger.Execution().Infof("Started process with pid %d.", pid)
}
return proc, nil
})
if !c.IgnoreStandardOutput {
if c.SystemLog {
cmd.SetOutputSender(level.Info, logger.System().GetSender())
} else {
cmd.SetOutputSender(level.Info, logger.Task().GetSender())
}
}
if !c.IgnoreStandardError {
if c.SystemLog {
cmd.SetErrorSender(level.Error, logger.System().GetSender())
} else {
cmd.SetErrorSender(level.Error, logger.Task().GetSender())
}
}
return cmd
}
// getExecutablePath returns the path to the command executable to run.
// If the executable is available in the default runtime environment's PATH or
// it is a file path (i.e. it's not supposed to be found in the PATH), then the
// executable binary will be returned as-is. Otherwise if it can't find the
// command in the default PATH locations, the command will fall back to checking
// the command's PATH environment variable for a matching executable location
// (if any).
func (c *subprocessExec) getExecutablePath(logger client.LoggerProducer) (absPath string, err error) {
defaultPath, err := exec.LookPath(c.Binary)
if defaultPath != "" |
cmdPath := c.Env["PATH"]
// For non-Windows platforms, the filepath.Separator is always '/'. However,
// for Windows, Go accepts both '\' and '/' as valid file path separators,
// even though the native filepath.Separator for Windows is really '\'. This
// detects both '\' and '/' as valid Windows file path separators.
binaryIsFilePath := strings.Contains(c.Binary, string(filepath.Separator)) || runtime.GOOS == "windows" && strings.Contains(c.Binary, "/")
if len(cmdPath) == 0 || binaryIsFilePath {
return c.Binary, nil
}
| {
return c.Binary, err
} | conditional_block |
Randomizer.py | = "none"
# clears recording constraints
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
Selector_Screen.match_winner = "none"
Selector_Screen.player_array = ['none','none']
# clears entry boxes and radio buttons
self.Game_Mode.set(0)
self.Min_Difficulty.delete(0,END)
self.Max_Difficulty.delete(0,END)
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
self.Selected_Song['image'] = self.Main_Menu
# refreshes entry boxes and splash image
self.Min_Difficulty.update()
self.Max_Difficulty.update()
self.Selected_Song.update()
# popup asking and returning the winner of the match
def | (self,parent):
# button function: saves winner to class variable and closes popup
def return_value(event='<Button-1>'):
# if button is selected
if winner.get() != 0:
# if not a draw
if winner.get() != 3:
Selector_Screen.match_winner = Selector_Screen.player_array[winner.get() - 1]
# if a draw
else:
Selector_Screen.match_winner = "Draw"
# close popup
self.win.destroy()
# creates popop window
self.win = Toplevel(parent)
self.win.wm_title("Select Winner")
# button images
self.Continue = Tkinter.PhotoImage(file="./Graphics/Continue.gif")
self.Cancel = Tkinter.PhotoImage(file="./Graphics/Cancel.gif")
# initialize winner radio button variable
winner = IntVar()
winner.set(0)
# construct frame for widgets
self.Inner_Frame = Frame(self.win)
self.Inner_Frame.grid(row=0,column=0)
# asks who the winner is
Winner_Label = Label(self.Inner_Frame, text="The winner is:")
Winner_Label.grid(row=0, column=0,columnspan=2)
Winner_Label.configure(font=("TkDefaultFont",24))
# player one, two, and draw radio buttons
self.Player_One__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[0], variable=winner, value=1)
self.Player_Two__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[1], variable=winner, value=2)
self.Draw = Radiobutton(self.Inner_Frame, text="Draw", variable=winner, value=3)
# places radio buttons
self.Player_One__Winner.grid(row=1,column=0,pady=(25,25))
self.Player_Two__Winner.grid(row=1,column=1,pady=(25,25))
self.Draw.grid(row=2,column=0,pady=(0,25),columnspan=2)
# configures radio button fonts
self.Player_One__Winner.configure(font=("TkDefaultFont",24))
self.Player_Two__Winner.configure(font=("TkDefaultFont",24))
self.Draw.configure(font=("TkDefaultFont",24))
# creates and places continue button
Select_Button = Button(self.Inner_Frame, image=self.Continue, command=return_value)
Select_Button.grid(row=3, column=0)
# creates and places cancel button
Cancel_Button = Button(self.Inner_Frame, image=self.Cancel, command=self.win.destroy)
Cancel_Button.grid(row=3, column=1)
# asks for winner and saves match information in tournament log file
def Report_Match(self,event='<Button-1'):
# if there isn't a song selected, do nothing
if not Selector_Screen.report_mode:
return
# initialize the time of the match
time_of_match = str(datetime.datetime.now().time()).split(".")[0]
# ask for the winner
self.Prompt_Winner(self.master)
self.master.wait_window(self.win)
# initialize list of players
name_list = ""
# convert the time of day into 12 hour format
time_array = time_of_match.split(":")
if int(time_array[0]) > 12:
time_of_match = "%s:%s PM" % (str(int(time_array[0]) - 12), time_array[1])
elif int(time_array[0]) < 12 and int(time_array[0]) != 0:
time_of_match = "%s:%s AM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 12:
time_of_match = "%s:%s PM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 0:
time_of_match = "%s:%s AM" % (str(int(time_array[0]) + 12), time_array[1])
# save list of players
for names in Selector_Screen.player_array:
name_list = name_list + names + ", "
name_list = name_list[:-2]
# writes the song played, who played the song, who one the match, and when the match was
Tournament_Log.write("Song: %s %s\n" % (Selector_Screen.current_song,Selector_Screen.current_diff))
Tournament_Log.write("Players: %s\n" % (name_list))
Tournament_Log.write("Winner: %s\n" % (Selector_Screen.match_winner))
Tournament_Log.write("Time of Match: %s\n\n" % (time_of_match))
# writes results to meta file for long-term statistics
Meta_Log.write("%s,%s,%s,%s,%s,%s,\n" % (
Selector_Screen.current_song,
Selector_Screen.diff_mode,
Selector_Screen.current_diff[1:],
tournament_name,
Selector_Screen.match_winner,
name_list
)
)
# resets song
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
self.Selected_Song['image'] = self.Main_Menu
# resets players
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
Selector_Screen.player_array = ['none','none']
# refreshes entry boxes and splash screen
self.Player_One_Entry.update()
self.Player_Two_Entry.update()
self.Selected_Song.update()
# randomly selects song in range and displays on the screen
def Select_Song(self,event='<Button-1>'):
# check for required values before continuing
if self.Min_Difficulty.get() != "" and self.Min_Difficulty.get().isdigit():
Selector_Screen.min_diff = int(self.Min_Difficulty.get())
else:
return
if self.Max_Difficulty.get() != "" and self.Max_Difficulty.get().isdigit():
Selector_Screen.max_diff = int(self.Max_Difficulty.get())
else:
return
if self.Game_Mode.get() != 0:
Selector_Screen.diff_mode = song_int_dict[self.Game_Mode.get()]
else:
return
if self.Player_One_Entry.get() != "" and self.Player_Two_Entry.get() != "":
Selector_Screen.player_array = [self.Player_One_Entry.get(),self.Player_Two_Entry.get()]
#call required parameters from class variables
min_difficulty = Selector_Screen.min_diff
max_difficulty = Selector_Screen.max_diff
difficulty_mode = Selector_Screen.diff_mode
difficulty_level = randint(min_difficulty,max_difficulty)
# cycles through the difficulties to look for the correct mode and level
for difficulties in os.listdir(song_path):
# singles/doubles and #1-26
target_mode = difficulties.split("_")[0]
target_level = int(difficulties.split("_")[1])
# if there's a match, search foro a song at random
if target_mode == difficulty_mode and target_level == difficulty_level:
# save path to mode_level folder
target_path = "%s/%s/" % (song_path,difficulties)
total_songs = len(os.listdir(target_path))
#choose a random song in the folder
target_song_index = randint(0,total_songs - 1)
target_song_path = sorted(glob.glob(target_path + "*.JPG"))[target_song_index]
# retrieve information from the song filename
song_information = ((os.path.basename(target_song_path)).split(".")[0]).split("_")
# convert song name from dash to space format
song_name = ""
song_name_information = song_information[0].split("-")
for words in song_name_information:
song_name = song_name + words + " "
song_name = song_name[:-1]
# retrieve song composer and bpm
song_composer = song_information[1]
s | Prompt_Winner | identifier_name |
Randomizer.py | = "none"
# clears recording constraints
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
Selector_Screen.match_winner = "none"
Selector_Screen.player_array = ['none','none']
# clears entry boxes and radio buttons
self.Game_Mode.set(0)
self.Min_Difficulty.delete(0,END)
self.Max_Difficulty.delete(0,END)
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
self.Selected_Song['image'] = self.Main_Menu
# refreshes entry boxes and splash image
self.Min_Difficulty.update()
self.Max_Difficulty.update()
self.Selected_Song.update()
# popup asking and returning the winner of the match
def Prompt_Winner(self,parent):
# button function: saves winner to class variable and closes popup
def return_value(event='<Button-1>'):
# if button is selected
if winner.get() != 0:
# if not a draw
if winner.get() != 3:
|
# if a draw
else:
Selector_Screen.match_winner = "Draw"
# close popup
self.win.destroy()
# creates popop window
self.win = Toplevel(parent)
self.win.wm_title("Select Winner")
# button images
self.Continue = Tkinter.PhotoImage(file="./Graphics/Continue.gif")
self.Cancel = Tkinter.PhotoImage(file="./Graphics/Cancel.gif")
# initialize winner radio button variable
winner = IntVar()
winner.set(0)
# construct frame for widgets
self.Inner_Frame = Frame(self.win)
self.Inner_Frame.grid(row=0,column=0)
# asks who the winner is
Winner_Label = Label(self.Inner_Frame, text="The winner is:")
Winner_Label.grid(row=0, column=0,columnspan=2)
Winner_Label.configure(font=("TkDefaultFont",24))
# player one, two, and draw radio buttons
self.Player_One__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[0], variable=winner, value=1)
self.Player_Two__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[1], variable=winner, value=2)
self.Draw = Radiobutton(self.Inner_Frame, text="Draw", variable=winner, value=3)
# places radio buttons
self.Player_One__Winner.grid(row=1,column=0,pady=(25,25))
self.Player_Two__Winner.grid(row=1,column=1,pady=(25,25))
self.Draw.grid(row=2,column=0,pady=(0,25),columnspan=2)
# configures radio button fonts
self.Player_One__Winner.configure(font=("TkDefaultFont",24))
self.Player_Two__Winner.configure(font=("TkDefaultFont",24))
self.Draw.configure(font=("TkDefaultFont",24))
# creates and places continue button
Select_Button = Button(self.Inner_Frame, image=self.Continue, command=return_value)
Select_Button.grid(row=3, column=0)
# creates and places cancel button
Cancel_Button = Button(self.Inner_Frame, image=self.Cancel, command=self.win.destroy)
Cancel_Button.grid(row=3, column=1)
# asks for winner and saves match information in tournament log file
def Report_Match(self,event='<Button-1'):
# if there isn't a song selected, do nothing
if not Selector_Screen.report_mode:
return
# initialize the time of the match
time_of_match = str(datetime.datetime.now().time()).split(".")[0]
# ask for the winner
self.Prompt_Winner(self.master)
self.master.wait_window(self.win)
# initialize list of players
name_list = ""
# convert the time of day into 12 hour format
time_array = time_of_match.split(":")
if int(time_array[0]) > 12:
time_of_match = "%s:%s PM" % (str(int(time_array[0]) - 12), time_array[1])
elif int(time_array[0]) < 12 and int(time_array[0]) != 0:
time_of_match = "%s:%s AM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 12:
time_of_match = "%s:%s PM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 0:
time_of_match = "%s:%s AM" % (str(int(time_array[0]) + 12), time_array[1])
# save list of players
for names in Selector_Screen.player_array:
name_list = name_list + names + ", "
name_list = name_list[:-2]
# writes the song played, who played the song, who one the match, and when the match was
Tournament_Log.write("Song: %s %s\n" % (Selector_Screen.current_song,Selector_Screen.current_diff))
Tournament_Log.write("Players: %s\n" % (name_list))
Tournament_Log.write("Winner: %s\n" % (Selector_Screen.match_winner))
Tournament_Log.write("Time of Match: %s\n\n" % (time_of_match))
# writes results to meta file for long-term statistics
Meta_Log.write("%s,%s,%s,%s,%s,%s,\n" % (
Selector_Screen.current_song,
Selector_Screen.diff_mode,
Selector_Screen.current_diff[1:],
tournament_name,
Selector_Screen.match_winner,
name_list
)
)
# resets song
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
self.Selected_Song['image'] = self.Main_Menu
# resets players
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
Selector_Screen.player_array = ['none','none']
# refreshes entry boxes and splash screen
self.Player_One_Entry.update()
self.Player_Two_Entry.update()
self.Selected_Song.update()
# randomly selects song in range and displays on the screen
def Select_Song(self,event='<Button-1>'):
# check for required values before continuing
if self.Min_Difficulty.get() != "" and self.Min_Difficulty.get().isdigit():
Selector_Screen.min_diff = int(self.Min_Difficulty.get())
else:
return
if self.Max_Difficulty.get() != "" and self.Max_Difficulty.get().isdigit():
Selector_Screen.max_diff = int(self.Max_Difficulty.get())
else:
return
if self.Game_Mode.get() != 0:
Selector_Screen.diff_mode = song_int_dict[self.Game_Mode.get()]
else:
return
if self.Player_One_Entry.get() != "" and self.Player_Two_Entry.get() != "":
Selector_Screen.player_array = [self.Player_One_Entry.get(),self.Player_Two_Entry.get()]
#call required parameters from class variables
min_difficulty = Selector_Screen.min_diff
max_difficulty = Selector_Screen.max_diff
difficulty_mode = Selector_Screen.diff_mode
difficulty_level = randint(min_difficulty,max_difficulty)
# cycles through the difficulties to look for the correct mode and level
for difficulties in os.listdir(song_path):
# singles/doubles and #1-26
target_mode = difficulties.split("_")[0]
target_level = int(difficulties.split("_")[1])
# if there's a match, search foro a song at random
if target_mode == difficulty_mode and target_level == difficulty_level:
# save path to mode_level folder
target_path = "%s/%s/" % (song_path,difficulties)
total_songs = len(os.listdir(target_path))
#choose a random song in the folder
target_song_index = randint(0,total_songs - 1)
target_song_path = sorted(glob.glob(target_path + "*.JPG"))[target_song_index]
# retrieve information from the song filename
song_information = ((os.path.basename(target_song_path)).split(".")[0]).split("_")
# convert song name from dash to space format
song_name = ""
song_name_information = song_information[0].split("-")
for words in song_name_information:
song_name = song_name + words + " "
song_name = song_name[:-1]
# retrieve song composer and bpm
song_composer = song_information[1]
s | Selector_Screen.match_winner = Selector_Screen.player_array[winner.get() - 1] | conditional_block |
Randomizer.py | = "none"
# clears recording constraints
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
Selector_Screen.match_winner = "none"
Selector_Screen.player_array = ['none','none']
# clears entry boxes and radio buttons
self.Game_Mode.set(0)
self.Min_Difficulty.delete(0,END)
self.Max_Difficulty.delete(0,END)
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
self.Selected_Song['image'] = self.Main_Menu
# refreshes entry boxes and splash image
self.Min_Difficulty.update()
self.Max_Difficulty.update()
self.Selected_Song.update()
# popup asking and returning the winner of the match
def Prompt_Winner(self,parent):
# button function: saves winner to class variable and closes popup
def return_value(event='<Button-1>'):
# if button is selected
if winner.get() != 0:
# if not a draw
if winner.get() != 3:
Selector_Screen.match_winner = Selector_Screen.player_array[winner.get() - 1]
# if a draw
else:
Selector_Screen.match_winner = "Draw"
# close popup
self.win.destroy()
# creates popop window
self.win = Toplevel(parent)
self.win.wm_title("Select Winner")
# button images
self.Continue = Tkinter.PhotoImage(file="./Graphics/Continue.gif")
self.Cancel = Tkinter.PhotoImage(file="./Graphics/Cancel.gif") | winner.set(0)
# construct frame for widgets
self.Inner_Frame = Frame(self.win)
self.Inner_Frame.grid(row=0,column=0)
# asks who the winner is
Winner_Label = Label(self.Inner_Frame, text="The winner is:")
Winner_Label.grid(row=0, column=0,columnspan=2)
Winner_Label.configure(font=("TkDefaultFont",24))
# player one, two, and draw radio buttons
self.Player_One__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[0], variable=winner, value=1)
self.Player_Two__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[1], variable=winner, value=2)
self.Draw = Radiobutton(self.Inner_Frame, text="Draw", variable=winner, value=3)
# places radio buttons
self.Player_One__Winner.grid(row=1,column=0,pady=(25,25))
self.Player_Two__Winner.grid(row=1,column=1,pady=(25,25))
self.Draw.grid(row=2,column=0,pady=(0,25),columnspan=2)
# configures radio button fonts
self.Player_One__Winner.configure(font=("TkDefaultFont",24))
self.Player_Two__Winner.configure(font=("TkDefaultFont",24))
self.Draw.configure(font=("TkDefaultFont",24))
# creates and places continue button
Select_Button = Button(self.Inner_Frame, image=self.Continue, command=return_value)
Select_Button.grid(row=3, column=0)
# creates and places cancel button
Cancel_Button = Button(self.Inner_Frame, image=self.Cancel, command=self.win.destroy)
Cancel_Button.grid(row=3, column=1)
# asks for winner and saves match information in tournament log file
def Report_Match(self,event='<Button-1'):
# if there isn't a song selected, do nothing
if not Selector_Screen.report_mode:
return
# initialize the time of the match
time_of_match = str(datetime.datetime.now().time()).split(".")[0]
# ask for the winner
self.Prompt_Winner(self.master)
self.master.wait_window(self.win)
# initialize list of players
name_list = ""
# convert the time of day into 12 hour format
time_array = time_of_match.split(":")
if int(time_array[0]) > 12:
time_of_match = "%s:%s PM" % (str(int(time_array[0]) - 12), time_array[1])
elif int(time_array[0]) < 12 and int(time_array[0]) != 0:
time_of_match = "%s:%s AM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 12:
time_of_match = "%s:%s PM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 0:
time_of_match = "%s:%s AM" % (str(int(time_array[0]) + 12), time_array[1])
# save list of players
for names in Selector_Screen.player_array:
name_list = name_list + names + ", "
name_list = name_list[:-2]
# writes the song played, who played the song, who one the match, and when the match was
Tournament_Log.write("Song: %s %s\n" % (Selector_Screen.current_song,Selector_Screen.current_diff))
Tournament_Log.write("Players: %s\n" % (name_list))
Tournament_Log.write("Winner: %s\n" % (Selector_Screen.match_winner))
Tournament_Log.write("Time of Match: %s\n\n" % (time_of_match))
# writes results to meta file for long-term statistics
Meta_Log.write("%s,%s,%s,%s,%s,%s,\n" % (
Selector_Screen.current_song,
Selector_Screen.diff_mode,
Selector_Screen.current_diff[1:],
tournament_name,
Selector_Screen.match_winner,
name_list
)
)
# resets song
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
self.Selected_Song['image'] = self.Main_Menu
# resets players
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
Selector_Screen.player_array = ['none','none']
# refreshes entry boxes and splash screen
self.Player_One_Entry.update()
self.Player_Two_Entry.update()
self.Selected_Song.update()
# randomly selects song in range and displays on the screen
def Select_Song(self,event='<Button-1>'):
# check for required values before continuing
if self.Min_Difficulty.get() != "" and self.Min_Difficulty.get().isdigit():
Selector_Screen.min_diff = int(self.Min_Difficulty.get())
else:
return
if self.Max_Difficulty.get() != "" and self.Max_Difficulty.get().isdigit():
Selector_Screen.max_diff = int(self.Max_Difficulty.get())
else:
return
if self.Game_Mode.get() != 0:
Selector_Screen.diff_mode = song_int_dict[self.Game_Mode.get()]
else:
return
if self.Player_One_Entry.get() != "" and self.Player_Two_Entry.get() != "":
Selector_Screen.player_array = [self.Player_One_Entry.get(),self.Player_Two_Entry.get()]
#call required parameters from class variables
min_difficulty = Selector_Screen.min_diff
max_difficulty = Selector_Screen.max_diff
difficulty_mode = Selector_Screen.diff_mode
difficulty_level = randint(min_difficulty,max_difficulty)
# cycles through the difficulties to look for the correct mode and level
for difficulties in os.listdir(song_path):
# singles/doubles and #1-26
target_mode = difficulties.split("_")[0]
target_level = int(difficulties.split("_")[1])
# if there's a match, search foro a song at random
if target_mode == difficulty_mode and target_level == difficulty_level:
# save path to mode_level folder
target_path = "%s/%s/" % (song_path,difficulties)
total_songs = len(os.listdir(target_path))
#choose a random song in the folder
target_song_index = randint(0,total_songs - 1)
target_song_path = sorted(glob.glob(target_path + "*.JPG"))[target_song_index]
# retrieve information from the song filename
song_information = ((os.path.basename(target_song_path)).split(".")[0]).split("_")
# convert song name from dash to space format
song_name = ""
song_name_information = song_information[0].split("-")
for words in song_name_information:
song_name = song_name + words + " "
song_name = song_name[:-1]
# retrieve song composer and bpm
song_composer = song_information[1]
s |
# initialize winner radio button variable
winner = IntVar() | random_line_split |
Randomizer.py | = "none"
# clears recording constraints
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
Selector_Screen.match_winner = "none"
Selector_Screen.player_array = ['none','none']
# clears entry boxes and radio buttons
self.Game_Mode.set(0)
self.Min_Difficulty.delete(0,END)
self.Max_Difficulty.delete(0,END)
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
self.Selected_Song['image'] = self.Main_Menu
# refreshes entry boxes and splash image
self.Min_Difficulty.update()
self.Max_Difficulty.update()
self.Selected_Song.update()
# popup asking and returning the winner of the match
def Prompt_Winner(self,parent):
# button function: saves winner to class variable and closes popup
| # button images
self.Continue = Tkinter.PhotoImage(file="./Graphics/Continue.gif")
self.Cancel = Tkinter.PhotoImage(file="./Graphics/Cancel.gif")
# initialize winner radio button variable
winner = IntVar()
winner.set(0)
# construct frame for widgets
self.Inner_Frame = Frame(self.win)
self.Inner_Frame.grid(row=0,column=0)
# asks who the winner is
Winner_Label = Label(self.Inner_Frame, text="The winner is:")
Winner_Label.grid(row=0, column=0,columnspan=2)
Winner_Label.configure(font=("TkDefaultFont",24))
# player one, two, and draw radio buttons
self.Player_One__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[0], variable=winner, value=1)
self.Player_Two__Winner = Radiobutton(self.Inner_Frame, text=Selector_Screen.player_array[1], variable=winner, value=2)
self.Draw = Radiobutton(self.Inner_Frame, text="Draw", variable=winner, value=3)
# places radio buttons
self.Player_One__Winner.grid(row=1,column=0,pady=(25,25))
self.Player_Two__Winner.grid(row=1,column=1,pady=(25,25))
self.Draw.grid(row=2,column=0,pady=(0,25),columnspan=2)
# configures radio button fonts
self.Player_One__Winner.configure(font=("TkDefaultFont",24))
self.Player_Two__Winner.configure(font=("TkDefaultFont",24))
self.Draw.configure(font=("TkDefaultFont",24))
# creates and places continue button
Select_Button = Button(self.Inner_Frame, image=self.Continue, command=return_value)
Select_Button.grid(row=3, column=0)
# creates and places cancel button
Cancel_Button = Button(self.Inner_Frame, image=self.Cancel, command=self.win.destroy)
Cancel_Button.grid(row=3, column=1)
# asks for winner and saves match information in tournament log file
def Report_Match(self,event='<Button-1'):
# if there isn't a song selected, do nothing
if not Selector_Screen.report_mode:
return
# initialize the time of the match
time_of_match = str(datetime.datetime.now().time()).split(".")[0]
# ask for the winner
self.Prompt_Winner(self.master)
self.master.wait_window(self.win)
# initialize list of players
name_list = ""
# convert the time of day into 12 hour format
time_array = time_of_match.split(":")
if int(time_array[0]) > 12:
time_of_match = "%s:%s PM" % (str(int(time_array[0]) - 12), time_array[1])
elif int(time_array[0]) < 12 and int(time_array[0]) != 0:
time_of_match = "%s:%s AM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 12:
time_of_match = "%s:%s PM" % (time_array[0], time_array[1])
elif int(time_array[0]) == 0:
time_of_match = "%s:%s AM" % (str(int(time_array[0]) + 12), time_array[1])
# save list of players
for names in Selector_Screen.player_array:
name_list = name_list + names + ", "
name_list = name_list[:-2]
# writes the song played, who played the song, who one the match, and when the match was
Tournament_Log.write("Song: %s %s\n" % (Selector_Screen.current_song,Selector_Screen.current_diff))
Tournament_Log.write("Players: %s\n" % (name_list))
Tournament_Log.write("Winner: %s\n" % (Selector_Screen.match_winner))
Tournament_Log.write("Time of Match: %s\n\n" % (time_of_match))
# writes results to meta file for long-term statistics
Meta_Log.write("%s,%s,%s,%s,%s,%s,\n" % (
Selector_Screen.current_song,
Selector_Screen.diff_mode,
Selector_Screen.current_diff[1:],
tournament_name,
Selector_Screen.match_winner,
name_list
)
)
# resets song
Selector_Screen.current_song = "none"
Selector_Screen.current_diff = "none"
Selector_Screen.report_mode = False
self.Selected_Song['image'] = self.Main_Menu
# resets players
self.Player_One_Entry.delete(0,END)
self.Player_Two_Entry.delete(0,END)
Selector_Screen.player_array = ['none','none']
# refreshes entry boxes and splash screen
self.Player_One_Entry.update()
self.Player_Two_Entry.update()
self.Selected_Song.update()
# randomly selects song in range and displays on the screen
def Select_Song(self,event='<Button-1>'):
# check for required values before continuing
if self.Min_Difficulty.get() != "" and self.Min_Difficulty.get().isdigit():
Selector_Screen.min_diff = int(self.Min_Difficulty.get())
else:
return
if self.Max_Difficulty.get() != "" and self.Max_Difficulty.get().isdigit():
Selector_Screen.max_diff = int(self.Max_Difficulty.get())
else:
return
if self.Game_Mode.get() != 0:
Selector_Screen.diff_mode = song_int_dict[self.Game_Mode.get()]
else:
return
if self.Player_One_Entry.get() != "" and self.Player_Two_Entry.get() != "":
Selector_Screen.player_array = [self.Player_One_Entry.get(),self.Player_Two_Entry.get()]
#call required parameters from class variables
min_difficulty = Selector_Screen.min_diff
max_difficulty = Selector_Screen.max_diff
difficulty_mode = Selector_Screen.diff_mode
difficulty_level = randint(min_difficulty,max_difficulty)
# cycles through the difficulties to look for the correct mode and level
for difficulties in os.listdir(song_path):
# singles/doubles and #1-26
target_mode = difficulties.split("_")[0]
target_level = int(difficulties.split("_")[1])
# if there's a match, search foro a song at random
if target_mode == difficulty_mode and target_level == difficulty_level:
# save path to mode_level folder
target_path = "%s/%s/" % (song_path,difficulties)
total_songs = len(os.listdir(target_path))
#choose a random song in the folder
target_song_index = randint(0,total_songs - 1)
target_song_path = sorted(glob.glob(target_path + "*.JPG"))[target_song_index]
# retrieve information from the song filename
song_information = ((os.path.basename(target_song_path)).split(".")[0]).split("_")
# convert song name from dash to space format
song_name = ""
song_name_information = song_information[0].split("-")
for words in song_name_information:
song_name = song_name + words + " "
song_name = song_name[:-1]
# retrieve song composer and bpm
song_composer = song_information[1]
s | def return_value(event='<Button-1>'):
# if button is selected
if winner.get() != 0:
# if not a draw
if winner.get() != 3:
Selector_Screen.match_winner = Selector_Screen.player_array[winner.get() - 1]
# if a draw
else:
Selector_Screen.match_winner = "Draw"
# close popup
self.win.destroy()
# creates popop window
self.win = Toplevel(parent)
self.win.wm_title("Select Winner")
| identifier_body |
list.go | VList creates and returns a pointer to a new vertical list panel
// with the specified dimensions
func NewVList(width, height float32) *List {
return newList(true, width, height)
}
// NewHList creates and returns a pointer to a new horizontal list panel
// with the specified dimensions
func NewHList(width, height float32) *List {
return newList(false, width, height)
}
// newList creates and returns a pointer to a new list panel
// with the specified orientation and dimensions
func newList(vert bool, width, height float32) *List {
li := new(List)
li.initialize(vert, width, height)
return li
}
func (li *List) initialize(vert bool, width, height float32) {
li.styles = &StyleDefault().List
li.single = true
li.ItemScroller.initialize(vert, width, height)
li.ItemScroller.SetStyles(li.styles.Scroller)
li.ItemScroller.adjustItem = true
li.ItemScroller.Subscribe(OnKeyDown, li.onKeyEvent)
li.ItemScroller.Subscribe(OnKeyRepeat, li.onKeyEvent)
if vert {
li.keyNext = window.KeyDown
li.keyPrev = window.KeyUp
} else {
li.keyNext = window.KeyRight
li.keyPrev = window.KeyLeft
}
li.update()
}
// SetSingle sets the single/multiple selection flag of the list
func (li *List) SetSingle(state bool) {
li.single = state
}
// Single returns the current state of the single/multiple selection flag
func (li *List) Single() bool {
return li.single
}
// SetStyles set the listr styles overriding the default style
func (li *List) SetStyles(s *ListStyles) {
li.styles = s
li.ItemScroller.SetStyles(li.styles.Scroller)
li.update()
}
// Add add a list item at the end of the list
func (li *List) Add(item IPanel) *ListItem {
return li.InsertAt(len(li.items), item)
}
// InsertAt inserts a list item at the specified position
// Returs true if the item was successfully inserted
func (li *List) InsertAt(pos int, item IPanel) *ListItem {
litem := newListItem(li, item)
li.ItemScroller.InsertAt(pos, litem)
litem.Panel.Subscribe(OnMouseDown, litem.onMouse)
litem.Panel.Subscribe(OnCursorEnter, litem.onCursor)
return litem
}
// RemoveAt removes the list item from the specified position
func (li *List) RemoveAt(pos int) IPanel {
// Remove the list item from the internal scroller
pan := li.ItemScroller.RemoveAt(pos)
litem := pan.(*ListItem)
// Remove item from the list item children and disposes of the list item panel
item := litem.item
litem.Remove(item)
litem.Dispose()
return item
}
// Remove removes the specified item from the list
func (li *List) Remove(item IPanel) {
for p, curr := range li.items {
if curr.(*ListItem).item == item {
li.RemoveAt(p)
return
}
}
}
// ItemAt returns the list item at the specified position
func (li *List) ItemAt(pos int) IPanel {
item := li.ItemScroller.ItemAt(pos)
if item == nil {
return nil
}
litem := item.(*ListItem)
return litem.item
}
// ItemPosition returns the position of the specified item in
// the list or -1 if not found
func (li *List) ItemPosition(item IPanel) int {
for pos := 0; pos < len(li.items); pos++ {
if li.items[pos].(*ListItem).item == item {
return pos
}
}
return -1
}
// Selected returns list with the currently selected items
func (li *List) Selected() []IPanel {
sel := []IPanel{}
for _, item := range li.items {
litem := item.(*ListItem)
if litem.selected {
sel = append(sel, litem.item)
}
}
return sel
}
// SetSelected selects or unselects the specified item
func (li *List) SetSelected(item IPanel, state bool) {
for _, curr := range li.items {
litem := curr.(*ListItem)
if litem.item == item {
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
return
}
}
}
// SelectPos selects or unselects the item at the specified position
func (li *List) SelectPos(pos int, state bool) |
// SetItemPadLeftAt sets the additional left padding for this item
// It is used mainly by the tree control
func (li *List) SetItemPadLeftAt(pos int, pad float32) {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
litem.padLeft = pad
litem.update()
}
// selNext selects or highlights the next item, if possible
func (li *List) selNext(sel bool, update bool) *ListItem {
// Checks for empty list
if len(li.items) == 0 {
return nil
}
// Find currently selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
// Item is not the last, get next
if pos < len(li.items)-1 {
newItem = li.items[pos+1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if !li.ItemVisible(pos + 1) {
li.ScrollDown()
}
// Item is the last, don't change
} else {
newItem = item
newSel = false
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selPrev selects or highlights the next item, if possible
func (li *List) selPrev(sel bool, update bool) *ListItem {
// Check for empty list
if len(li.items) == 0 {
return nil
}
// Find first selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
if pos == 0 {
newItem = item
newSel = false
} else {
newItem = li.items[pos-1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if (pos - 1) < li.first {
li.ScrollUp()
}
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selected returns the position of first selected item
func (li *List) selected() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).selected {
return pos
}
}
return -1
}
// highlighted returns the position of first highlighted item
func (li *List) highlighted() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).highlighted {
return pos
}
}
return -1
}
// onKeyEvent receives subscribed key events for the list
func (li *List) onKeyEvent(evname string, ev interface{}) {
kev := ev.(*window.KeyEvent)
// Dropdown mode
if li.dropdown {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
case window.KeyEnter:
li.SetVisible(false)
default:
return
}
return
}
// Listbox mode single selection
if li.single {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
default:
return
}
| {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
if litem.selected == state {
return
}
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
} | identifier_body |
list.go | (item IPanel) {
for p, curr := range li.items {
if curr.(*ListItem).item == item {
li.RemoveAt(p)
return
}
}
}
// ItemAt returns the list item at the specified position
func (li *List) ItemAt(pos int) IPanel {
item := li.ItemScroller.ItemAt(pos)
if item == nil {
return nil
}
litem := item.(*ListItem)
return litem.item
}
// ItemPosition returns the position of the specified item in
// the list or -1 if not found
func (li *List) ItemPosition(item IPanel) int {
for pos := 0; pos < len(li.items); pos++ {
if li.items[pos].(*ListItem).item == item {
return pos
}
}
return -1
}
// Selected returns list with the currently selected items
func (li *List) Selected() []IPanel {
sel := []IPanel{}
for _, item := range li.items {
litem := item.(*ListItem)
if litem.selected {
sel = append(sel, litem.item)
}
}
return sel
}
// SetSelected selects or unselects the specified item
func (li *List) SetSelected(item IPanel, state bool) {
for _, curr := range li.items {
litem := curr.(*ListItem)
if litem.item == item {
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
return
}
}
}
// SelectPos selects or unselects the item at the specified position
func (li *List) SelectPos(pos int, state bool) {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
if litem.selected == state {
return
}
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
}
// SetItemPadLeftAt sets the additional left padding for this item
// It is used mainly by the tree control
func (li *List) SetItemPadLeftAt(pos int, pad float32) {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
litem.padLeft = pad
litem.update()
}
// selNext selects or highlights the next item, if possible
func (li *List) selNext(sel bool, update bool) *ListItem {
// Checks for empty list
if len(li.items) == 0 {
return nil
}
// Find currently selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
// Item is not the last, get next
if pos < len(li.items)-1 {
newItem = li.items[pos+1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if !li.ItemVisible(pos + 1) {
li.ScrollDown()
}
// Item is the last, don't change
} else {
newItem = item
newSel = false
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selPrev selects or highlights the next item, if possible
func (li *List) selPrev(sel bool, update bool) *ListItem {
// Check for empty list
if len(li.items) == 0 {
return nil
}
// Find first selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
if pos == 0 {
newItem = item
newSel = false
} else {
newItem = li.items[pos-1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if (pos - 1) < li.first {
li.ScrollUp()
}
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selected returns the position of first selected item
func (li *List) selected() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).selected {
return pos
}
}
return -1
}
// highlighted returns the position of first highlighted item
func (li *List) highlighted() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).highlighted {
return pos
}
}
return -1
}
// onKeyEvent receives subscribed key events for the list
func (li *List) onKeyEvent(evname string, ev interface{}) {
kev := ev.(*window.KeyEvent)
// Dropdown mode
if li.dropdown {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
case window.KeyEnter:
li.SetVisible(false)
default:
return
}
return
}
// Listbox mode single selection
if li.single {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
default:
return
}
return
}
// Listbox mode multiple selection
switch kev.Key {
case li.keyNext:
li.selNext(false, true)
case li.keyPrev:
li.selPrev(false, true)
case window.KeySpace:
pos := li.highlighted()
if pos >= 0 {
litem := li.items[pos].(*ListItem)
li.setSelection(litem, !litem.selected, true, true)
}
default:
return
}
}
// setSelection sets the selected state of the specified item
// updating the visual appearance of the list if necessary
func (li *List) setSelection(litem *ListItem, state bool, force bool, dispatch bool) {
Manager().SetKeyFocus(li)
// If already at this state, nothing to do
if litem.selected == state && !force {
return
}
litem.SetSelected(state)
// If single selection, deselects all other items
if li.single {
for _, curr := range li.items {
if curr.(*ListItem) != litem {
curr.(*ListItem).SetSelected(false)
curr.(*ListItem).SetHighlighted(false)
}
}
}
li.update()
if dispatch {
li.Dispatch(OnChange, nil)
}
}
// update updates the visual state the list and its items
func (li *List) update() {
// Update the list items styles
for _, item := range li.items {
item.(*ListItem).update()
}
}
//
// ListItem methods
//
func newListItem(list *List, item IPanel) *ListItem {
litem := new(ListItem)
litem.Panel.Initialize(litem, 0, 0)
litem.item = item
litem.list = list
litem.Panel.Add(item)
litem.SetContentWidth(item.GetPanel().Width())
litem.SetContentHeight(item.GetPanel().Height())
// If this list item is resized, sends event to its child panel
litem.Subscribe(OnResize, func(evname string, ev interface{}) {
item.GetPanel().Dispatch(OnListItemResize, nil)
})
litem.update()
return litem
}
// onMouse receives mouse button events over the list item
func (litem *ListItem) onMouse(evname string, ev interface{}) {
if litem.list.single {
litem.list.setSelection(litem, true, true, true)
} else {
litem.list.setSelection(litem, !litem.selected, true, true)
}
if litem.list.dropdown {
litem.list.SetVisible(false)
}
}
// onCursor receives subscribed cursor events over the list item
func (litem *ListItem) onCursor(evname string, ev interface{}) {
if litem.list.dropdown {
litem.list.setSelection(litem, true, true, false)
return
}
}
// SetSelected sets this item selected state
func (litem *ListItem) SetSelected(state bool) {
litem.selected = state
//litem.item.SetSelected2(state)
}
// SetHighlighted sets this item selected state
func (litem *ListItem) | SetHighlighted | identifier_name |
|
list.go | litem
}
// RemoveAt removes the list item from the specified position
func (li *List) RemoveAt(pos int) IPanel {
// Remove the list item from the internal scroller
pan := li.ItemScroller.RemoveAt(pos)
litem := pan.(*ListItem)
// Remove item from the list item children and disposes of the list item panel
item := litem.item
litem.Remove(item)
litem.Dispose()
return item
}
// Remove removes the specified item from the list
func (li *List) Remove(item IPanel) {
for p, curr := range li.items {
if curr.(*ListItem).item == item {
li.RemoveAt(p)
return
}
}
}
// ItemAt returns the list item at the specified position
func (li *List) ItemAt(pos int) IPanel {
item := li.ItemScroller.ItemAt(pos)
if item == nil {
return nil
}
litem := item.(*ListItem)
return litem.item
}
// ItemPosition returns the position of the specified item in
// the list or -1 if not found
func (li *List) ItemPosition(item IPanel) int {
for pos := 0; pos < len(li.items); pos++ {
if li.items[pos].(*ListItem).item == item {
return pos
}
}
return -1
}
// Selected returns list with the currently selected items
func (li *List) Selected() []IPanel {
sel := []IPanel{}
for _, item := range li.items {
litem := item.(*ListItem)
if litem.selected {
sel = append(sel, litem.item)
}
}
return sel
}
// SetSelected selects or unselects the specified item
func (li *List) SetSelected(item IPanel, state bool) {
for _, curr := range li.items {
litem := curr.(*ListItem)
if litem.item == item {
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
return
}
}
}
// SelectPos selects or unselects the item at the specified position
func (li *List) SelectPos(pos int, state bool) {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
if litem.selected == state {
return
}
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
}
// SetItemPadLeftAt sets the additional left padding for this item
// It is used mainly by the tree control
func (li *List) SetItemPadLeftAt(pos int, pad float32) {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
litem.padLeft = pad
litem.update()
}
// selNext selects or highlights the next item, if possible
func (li *List) selNext(sel bool, update bool) *ListItem {
// Checks for empty list
if len(li.items) == 0 {
return nil
}
// Find currently selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
// Item is not the last, get next
if pos < len(li.items)-1 {
newItem = li.items[pos+1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if !li.ItemVisible(pos + 1) {
li.ScrollDown()
}
// Item is the last, don't change
} else {
newItem = item
newSel = false
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selPrev selects or highlights the next item, if possible
func (li *List) selPrev(sel bool, update bool) *ListItem {
// Check for empty list
if len(li.items) == 0 {
return nil
}
// Find first selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
if pos == 0 {
newItem = item
newSel = false
} else {
newItem = li.items[pos-1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if (pos - 1) < li.first {
li.ScrollUp()
}
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selected returns the position of first selected item
func (li *List) selected() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).selected {
return pos
}
}
return -1
}
// highlighted returns the position of first highlighted item
func (li *List) highlighted() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).highlighted {
return pos
}
}
return -1
}
// onKeyEvent receives subscribed key events for the list
func (li *List) onKeyEvent(evname string, ev interface{}) {
kev := ev.(*window.KeyEvent)
// Dropdown mode
if li.dropdown {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
case window.KeyEnter:
li.SetVisible(false)
default:
return
}
return
}
// Listbox mode single selection
if li.single {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
default:
return
}
return
}
// Listbox mode multiple selection
switch kev.Key {
case li.keyNext:
li.selNext(false, true)
case li.keyPrev:
li.selPrev(false, true)
case window.KeySpace:
pos := li.highlighted()
if pos >= 0 {
litem := li.items[pos].(*ListItem)
li.setSelection(litem, !litem.selected, true, true)
}
default:
return
}
}
// setSelection sets the selected state of the specified item
// updating the visual appearance of the list if necessary
func (li *List) setSelection(litem *ListItem, state bool, force bool, dispatch bool) {
Manager().SetKeyFocus(li)
// If already at this state, nothing to do
if litem.selected == state && !force {
return
}
litem.SetSelected(state)
// If single selection, deselects all other items
if li.single {
for _, curr := range li.items {
if curr.(*ListItem) != litem {
curr.(*ListItem).SetSelected(false)
curr.(*ListItem).SetHighlighted(false)
}
}
}
li.update()
if dispatch {
li.Dispatch(OnChange, nil)
}
}
// update updates the visual state the list and its items
func (li *List) update() {
// Update the list items styles
for _, item := range li.items {
item.(*ListItem).update()
}
}
//
// ListItem methods
//
func newListItem(list *List, item IPanel) *ListItem {
litem := new(ListItem)
litem.Panel.Initialize(litem, 0, 0)
litem.item = item
litem.list = list
litem.Panel.Add(item)
litem.SetContentWidth(item.GetPanel().Width())
litem.SetContentHeight(item.GetPanel().Height())
// If this list item is resized, sends event to its child panel
litem.Subscribe(OnResize, func(evname string, ev interface{}) {
item.GetPanel().Dispatch(OnListItemResize, nil)
})
litem.update()
return litem
}
// onMouse receives mouse button events over the list item
func (litem *ListItem) onMouse(evname string, ev interface{}) {
if litem.list.single {
litem.list.setSelection(litem, true, true, true)
} else {
litem.list.setSelection(litem, !litem.selected, true, true) | }
if litem.list.dropdown {
litem.list.SetVisible(false)
}
} | random_line_split |
|
list.go | *List) SetSingle(state bool) {
li.single = state
}
// Single returns the current state of the single/multiple selection flag
func (li *List) Single() bool {
return li.single
}
// SetStyles set the listr styles overriding the default style
func (li *List) SetStyles(s *ListStyles) {
li.styles = s
li.ItemScroller.SetStyles(li.styles.Scroller)
li.update()
}
// Add add a list item at the end of the list
func (li *List) Add(item IPanel) *ListItem {
return li.InsertAt(len(li.items), item)
}
// InsertAt inserts a list item at the specified position
// Returs true if the item was successfully inserted
func (li *List) InsertAt(pos int, item IPanel) *ListItem {
litem := newListItem(li, item)
li.ItemScroller.InsertAt(pos, litem)
litem.Panel.Subscribe(OnMouseDown, litem.onMouse)
litem.Panel.Subscribe(OnCursorEnter, litem.onCursor)
return litem
}
// RemoveAt removes the list item from the specified position
func (li *List) RemoveAt(pos int) IPanel {
// Remove the list item from the internal scroller
pan := li.ItemScroller.RemoveAt(pos)
litem := pan.(*ListItem)
// Remove item from the list item children and disposes of the list item panel
item := litem.item
litem.Remove(item)
litem.Dispose()
return item
}
// Remove removes the specified item from the list
func (li *List) Remove(item IPanel) {
for p, curr := range li.items {
if curr.(*ListItem).item == item {
li.RemoveAt(p)
return
}
}
}
// ItemAt returns the list item at the specified position
func (li *List) ItemAt(pos int) IPanel {
item := li.ItemScroller.ItemAt(pos)
if item == nil {
return nil
}
litem := item.(*ListItem)
return litem.item
}
// ItemPosition returns the position of the specified item in
// the list or -1 if not found
func (li *List) ItemPosition(item IPanel) int {
for pos := 0; pos < len(li.items); pos++ {
if li.items[pos].(*ListItem).item == item {
return pos
}
}
return -1
}
// Selected returns list with the currently selected items
func (li *List) Selected() []IPanel {
sel := []IPanel{}
for _, item := range li.items {
litem := item.(*ListItem)
if litem.selected {
sel = append(sel, litem.item)
}
}
return sel
}
// SetSelected selects or unselects the specified item
func (li *List) SetSelected(item IPanel, state bool) {
for _, curr := range li.items {
litem := curr.(*ListItem)
if litem.item == item {
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
return
}
}
}
// SelectPos selects or unselects the item at the specified position
func (li *List) SelectPos(pos int, state bool) {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
if litem.selected == state {
return
}
litem.SetSelected(state)
li.update()
li.Dispatch(OnChange, nil)
}
// SetItemPadLeftAt sets the additional left padding for this item
// It is used mainly by the tree control
func (li *List) SetItemPadLeftAt(pos int, pad float32) {
if pos < 0 || pos >= len(li.items) {
return
}
litem := li.items[pos].(*ListItem)
litem.padLeft = pad
litem.update()
}
// selNext selects or highlights the next item, if possible
func (li *List) selNext(sel bool, update bool) *ListItem {
// Checks for empty list
if len(li.items) == 0 {
return nil
}
// Find currently selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
// Item is not the last, get next
if pos < len(li.items)-1 {
newItem = li.items[pos+1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if !li.ItemVisible(pos + 1) {
li.ScrollDown()
}
// Item is the last, don't change
} else {
newItem = item
newSel = false
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selPrev selects or highlights the next item, if possible
func (li *List) selPrev(sel bool, update bool) *ListItem {
// Check for empty list
if len(li.items) == 0 {
return nil
}
// Find first selected item
var pos int
if sel {
pos = li.selected()
} else {
pos = li.highlighted()
}
var newItem *ListItem
newSel := true
// If no item found, returns first.
if pos < 0 {
newItem = li.items[0].(*ListItem)
if sel {
newItem.SetSelected(true)
} else {
newItem.SetHighlighted(true)
}
} else {
item := li.items[pos].(*ListItem)
if pos == 0 {
newItem = item
newSel = false
} else {
newItem = li.items[pos-1].(*ListItem)
if sel {
item.SetSelected(false)
newItem.SetSelected(true)
} else {
item.SetHighlighted(false)
newItem.SetHighlighted(true)
}
if (pos - 1) < li.first {
li.ScrollUp()
}
}
}
if update {
li.update()
}
if sel && newSel {
li.Dispatch(OnChange, nil)
}
return newItem
}
// selected returns the position of first selected item
func (li *List) selected() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).selected {
return pos
}
}
return -1
}
// highlighted returns the position of first highlighted item
func (li *List) highlighted() (pos int) {
for pos, item := range li.items {
if item.(*ListItem).highlighted {
return pos
}
}
return -1
}
// onKeyEvent receives subscribed key events for the list
func (li *List) onKeyEvent(evname string, ev interface{}) {
kev := ev.(*window.KeyEvent)
// Dropdown mode
if li.dropdown {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
case window.KeyEnter:
li.SetVisible(false)
default:
return
}
return
}
// Listbox mode single selection
if li.single {
switch kev.Key {
case li.keyNext:
li.selNext(true, true)
case li.keyPrev:
li.selPrev(true, true)
default:
return
}
return
}
// Listbox mode multiple selection
switch kev.Key {
case li.keyNext:
li.selNext(false, true)
case li.keyPrev:
li.selPrev(false, true)
case window.KeySpace:
pos := li.highlighted()
if pos >= 0 {
litem := li.items[pos].(*ListItem)
li.setSelection(litem, !litem.selected, true, true)
}
default:
return
}
}
// setSelection sets the selected state of the specified item
// updating the visual appearance of the list if necessary
func (li *List) setSelection(litem *ListItem, state bool, force bool, dispatch bool) {
Manager().SetKeyFocus(li)
// If already at this state, nothing to do
if litem.selected == state && !force {
return
}
litem.SetSelected(state)
// If single selection, deselects all other items
if li.single {
for _, curr := range li.items {
if curr.(*ListItem) != litem {
curr.(*ListItem).SetSelected(false)
curr.(*ListItem).SetHighlighted(false)
}
}
}
li.update()
if dispatch {
li.Dispatch(OnChange, nil)
}
}
// update updates the visual state the list and its items
func (li *List) update() {
// Update the list items styles
for _, item := range li.items | {
item.(*ListItem).update()
} | conditional_block |
|
test_network_moreMeasures.py | onlyfiles_mask.sort(key = natsort_key1)
counter = list(range(len(onlyfiles_mask))) # create a counter, so can randomize it
""" Load avg_img and std_img from TRAINING SET """
mean_arr = 0; std_arr = 0;
with open('./Data_functions/mean_arr.pkl', 'rb') as f: # Python 3: open(..., 'rb')
loaded = pickle.load(f)
mean_arr = loaded[0]
with open('./Data_functions/std_arr.pkl', 'rb') as f: # Python 3: open(..., 'rb')
loaded = pickle.load(f)
std_arr = loaded[0]
batch_x = []; batch_y = [];
for i in range(len(onlyfiles_mask)):
total_counter = 0
filename = onlyfiles_mask[counter[i]]
if filename.split('.')[-1] != 'tif':
continue
filename_split = filename.split('.')[0]
""" Load image """
#size = 3788 # 4775 and 6157 for the newest one
input_arr = readIm_counter(input_path,onlyfiles_mask, counter[i])
size_whole = input_arr.size[0]
""" Resize the input to be on scale of 0.6904 um/px """
size = int(size_whole) # 4775 and 6157 for the newest one
if resize:
size = int((size * im_scale) / 0.6904) # 4775 and 6157 for the newest one
input_arr = resize_adaptive(input_arr, size, method=Image.BICUBIC)
size_whole = input_arr.size
""" DO CLAHE """
if CLAHE == 1:
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
input_arr = np.asarray(input_arr)
red = clahe.apply(np.asarray(input_arr[:,:,0], dtype=np.uint8))
input_arr.setflags(write=1)
input_arr[:,:,0] = red
DAPI = clahe.apply(np.asarray(input_arr[:,:,2], dtype=np.uint8))
input_arr[:,:,2] = DAPI
input_arr = Image.fromarray(input_arr)
""" Pre-process and identify candidate nuclei """
DAPI_size = round(radius * radius * math.pi);
DAPI_tmp, total_matched_DAPI, total_DAPI, back_subbed = pre_process(input_arr, counter[i], DAPI_size, rolling_ball, name=onlyfiles_mask[counter[i]], sav_dir=sav_dir)
if rolling_ball > 0:
plt.imsave(sav_dir + 'background_subbed' + '_' + filename_split + '_' + str(i) + '.tiff', (Image.fromarray(np.asarray(back_subbed, dtype=np.uint8))))
labelled = measure.label(DAPI_tmp)
cc = measure.regionprops(labelled)
""" Initiate list of MACHINE counted CELL OBJECTS """
num_MBP_pos = len(cc)
list_M_cells = []
for T in range(num_MBP_pos):
cell = Cell(T)
list_M_cells.append(cell)
""" start looping and cropping """
N = 0 # SKIP THE FIRST DAPI POINT b/c it's the background?
table_results = []
seg_im = np.zeros(np.shape(DAPI_tmp))
overlap_im = np.zeros(np.shape(DAPI_tmp))
DAPI_im = np.zeros(np.shape(DAPI_tmp))
if np.shape(DAPI_tmp)[0] < 1024 or np.shape(DAPI_tmp)[1] < 640:
seg_im = np.zeros([1024, 640])
overlap_im = np.zeros([1024, 640])
DAPI_im = np.zeros([1024, 640])
while N < len(cc):
DAPI_idx = cc[N]['centroid']
if rotate:
width_x = 640
# extract CROP outo of everything
input_crop, coords = adapt_crop_DAPI(input_arr, DAPI_idx, length=len_x, width=width_x)
# some reason converting PIL to array results in a [x,y,4] array... if so, remove the last matrix
if input_crop.shape[-1] == 4 and channels == 3:
tmp = input_crop[:, :, 0:3]
input_crop = tmp
""" Create empty image with ONLY the DAPI at the DAPI_idx """
DAPI_coords = cc[N]['coords']
tmp = np.zeros(np.shape(DAPI_tmp))
for T in range(len(DAPI_coords)):
tmp[DAPI_coords[T,0], DAPI_coords[T,1]] = 255
tmp = Image.fromarray(tmp)
DAPI_crop, coords = adapt_crop_DAPI(tmp, DAPI_idx, length=len_x, width=width_x)
"""ENSURE COORDINATES WITHIN correct size"""
size = np.shape(input_crop)
width = size[0]
length = size[1]
while True:
print('Adapt crop')
c_width = int(coords[1]) - int(coords[0])
c_length = int(coords[3]) - int(coords[2])
if c_width > width: coords[1] = coords[1] - 1
elif c_width < width:
coords[1] = coords[1] + 1
if coords[1] > width: # in case it goest out of bounds
coords[0] = coords[0] - 1
if c_length > length: coords[3] = coords[3] - 1
elif c_length < length:
coords[3] = coords[3] + 1
if coords[3] > length:
coords[2] = coords[2] - 1
if c_width == width and c_length == length:
break;
""" Delete green channel by making it the DAPI_mask instead
"""
input_crop[:, :, 1] = np.zeros([len_x,width_x])
if channels == 4:
tmp = np.zeros([len_x,width_x,channels])
tmp[:,:,0:3] = input_crop
tmp[:,:,3] = DAPI_crop
input_crop = tmp
elif channels == 3:
input_crop[:,:,1] = DAPI_crop
""" FOR ROTATING THE IMAGE OR ADDING BLACK LINES TO THE SIDES """
deg_rotated = randint(0, 360)
if rotate:
# ROTATE the input_im
width_x = 1024
np_zeros = np.zeros([len_x, width_x, 3])
np_zeros[:,192:832, :] = input_crop[:, :, :]
if rand_rot:
im = Image.fromarray(np.asarray(np_zeros, dtype=np.uint8))
rotated = im.rotate(deg_rotated)
input_crop = np.asarray(rotated, dtype=np.float32)
else:
input_crop = np_zeros # delete this to do rotations
input_crop_save = np.copy(input_crop)
""" Normalize the image first """
input_crop = normalize_im(input_crop, mean_arr, std_arr)
truth_im = np.zeros([len_x, width_x, 2])
""" set inputs and truth """
batch_x.append(input_crop)
batch_y.append(truth_im)
feed_dict = {x:batch_x, y_:batch_y, training:0}
""" FEED_INPUT to NETWORK """
output = softMaxed.eval(feed_dict=feed_dict)
classification = np.argmax(output, axis = -1)[0]
if rotate: # reverse the rotation/adding black edges
# ROTATE the input_im
if rand_rot:
im = Image.fromarray(np.asarray(classification, dtype=np.uint8))
rotated = im.rotate(-deg_rotated)
classification = np.asarray(rotated, dtype=np.float32)
| try:
tf.reset_default_graph() # necessary?
# Variable Declaration
x = tf.placeholder('float32', shape=[None, len_x, width_x, channels], name='InputImage')
y_ = tf.placeholder('float32', shape=[None, len_x, width_x, 2], name='CorrectLabel')
training = tf.placeholder(tf.bool, name='training')
""" Creates network """
y, y_b, L1, L2, L3, L4, L5, L6, L7, L8, L9, L9_conv, L10, L11, logits, softMaxed = create_network(x, y_, training)
sess = tf.InteractiveSession()
""" TO LOAD OLD CHECKPOINT """
saver = tf.train.Saver()
saver.restore(sess, s_path + 'check_' + checkpoint)
""" Pre-processing """
# Read in file names
onlyfiles_mask = [ f for f in listdir(input_path) if isfile(join(input_path,f))]
natsort_key1 = natsort_keygen(key = lambda y: y.lower()) # natural sorting order | identifier_body |
|
test_network_moreMeasures.py | ])
DAPI_im = np.zeros([1024, 640])
while N < len(cc):
DAPI_idx = cc[N]['centroid']
if rotate:
width_x = 640
# extract CROP outo of everything
input_crop, coords = adapt_crop_DAPI(input_arr, DAPI_idx, length=len_x, width=width_x)
# some reason converting PIL to array results in a [x,y,4] array... if so, remove the last matrix
if input_crop.shape[-1] == 4 and channels == 3:
tmp = input_crop[:, :, 0:3]
input_crop = tmp
""" Create empty image with ONLY the DAPI at the DAPI_idx """
DAPI_coords = cc[N]['coords']
tmp = np.zeros(np.shape(DAPI_tmp))
for T in range(len(DAPI_coords)):
tmp[DAPI_coords[T,0], DAPI_coords[T,1]] = 255
tmp = Image.fromarray(tmp)
DAPI_crop, coords = adapt_crop_DAPI(tmp, DAPI_idx, length=len_x, width=width_x)
"""ENSURE COORDINATES WITHIN correct size"""
size = np.shape(input_crop)
width = size[0]
length = size[1]
while True:
print('Adapt crop')
c_width = int(coords[1]) - int(coords[0])
c_length = int(coords[3]) - int(coords[2])
if c_width > width: coords[1] = coords[1] - 1
elif c_width < width:
coords[1] = coords[1] + 1
if coords[1] > width: # in case it goest out of bounds
coords[0] = coords[0] - 1
if c_length > length: coords[3] = coords[3] - 1
elif c_length < length:
coords[3] = coords[3] + 1
if coords[3] > length:
coords[2] = coords[2] - 1
if c_width == width and c_length == length:
break;
""" Delete green channel by making it the DAPI_mask instead
"""
input_crop[:, :, 1] = np.zeros([len_x,width_x])
if channels == 4:
tmp = np.zeros([len_x,width_x,channels])
tmp[:,:,0:3] = input_crop
tmp[:,:,3] = DAPI_crop
input_crop = tmp
elif channels == 3:
input_crop[:,:,1] = DAPI_crop
""" FOR ROTATING THE IMAGE OR ADDING BLACK LINES TO THE SIDES """
deg_rotated = randint(0, 360)
if rotate:
# ROTATE the input_im
width_x = 1024
np_zeros = np.zeros([len_x, width_x, 3])
np_zeros[:,192:832, :] = input_crop[:, :, :]
if rand_rot:
im = Image.fromarray(np.asarray(np_zeros, dtype=np.uint8))
rotated = im.rotate(deg_rotated)
input_crop = np.asarray(rotated, dtype=np.float32)
else:
input_crop = np_zeros # delete this to do rotations
input_crop_save = np.copy(input_crop)
""" Normalize the image first """
input_crop = normalize_im(input_crop, mean_arr, std_arr)
truth_im = np.zeros([len_x, width_x, 2])
""" set inputs and truth """
batch_x.append(input_crop)
batch_y.append(truth_im)
feed_dict = {x:batch_x, y_:batch_y, training:0}
""" FEED_INPUT to NETWORK """
output = softMaxed.eval(feed_dict=feed_dict)
classification = np.argmax(output, axis = -1)[0]
if rotate: # reverse the rotation/adding black edges
# ROTATE the input_im
if rand_rot:
im = Image.fromarray(np.asarray(classification, dtype=np.uint8))
rotated = im.rotate(-deg_rotated)
classification = np.asarray(rotated, dtype=np.float32)
width_x = 640
np_zeros = np.zeros([len_x, width_x])
np_zeros[:, :] = classification[:, 192:832]
classification = np_zeros # delete this to do rotations
""" Plot for debug """
# if debug:
# plt.figure('Out'); plt.clf; plt.subplot(224); show_norm(input_crop[:, :, 0:3]); plt.pause(0.05);
# plt.subplot(221);
# true_m = np.argmax((batch_y[0]).astype('uint8'), axis=-1); plt.imshow(true_m);
# plt.title('Truth');
# plt.subplot(222); plt.imshow(DAPI_crop); plt.title('DAPI_mask');
# plt.subplot(223); plt.imshow(classification); plt.title('Output_seg');
# plt.pause(0.05);
""" Skeletonize and count number of fibers within the output ==> saved for "sticky-seperate" later """
copy_class = np.copy(classification)
skel = skel_one(copy_class, minLengthDuring)
labelled = measure.label(skel)
cc_overlap = measure.regionprops(labelled)
for T in range(len(cc_overlap)):
length = cc_overlap[T]['MajorAxisLength']
angle = cc_overlap[T]['Orientation']
overlap_coords = cc_overlap[T]['coords']
if length > minLengthDuring and (angle > +0.785398 or angle < -0.785398):
cell_num = N
list_M_cells[cell_num].add_fiber(length)
add_coords = [int(coords[0]), int(coords[2])]
overlap_coords = overlap_coords + add_coords
list_M_cells[cell_num].add_coords(overlap_coords)
""" Plot output of individual segmentations and the input truth ==> for correcting later!!!"""
if np.count_nonzero(classification) > 0 and debug:
plt.imsave(sav_dir + filename_split + '_' + str(i) + '-cell_number-' + str(N) + '_UNet-Seg_input.tiff',
np.asarray(input_crop_save, dtype = np.uint8))
plt.imsave(sav_dir + filename_split + '_' + str(i) + '-cell_number-' + str(N) + '_UNet-Seg_truth.tiff', (classification), cmap='binary_r')
""" Create mask of all segmented cells, also save as table """
classification[classification > 0] = N + 1
cropped_seg = seg_im[int(coords[0]): int(coords[1]), int(coords[2]): int(coords[3])]
added_seg = cropped_seg + classification + DAPI_crop
seg_im[int(coords[0]): int(coords[1]), int(coords[2]): int(coords[3])] = added_seg
""" Create mask of OVERLAPPED, by just adding ones together """
classification[classification > 0] = 1
cropped_seg = overlap_im[int(coords[0]): int(coords[1]), int(coords[2]): int(coords[3])]
added_seg = cropped_seg + classification
overlap_im[int(coords[0]): int(coords[1]), int(coords[2]): int(coords[3])] = added_seg
""" Create mask of DAPI, by just adding ones together """
DAPI_crop[DAPI_crop > 0] = N + 1
cropped_seg = DAPI_im[int(coords[0]): int(coords[1]), int(coords[2]): int(coords[3])]
added_seg = cropped_seg + DAPI_crop
DAPI_im[int(coords[0]): int(coords[1]), int(coords[2]): int(coords[3])] = added_seg
batch_x = []; batch_y = []
total_counter = total_counter + 1
N = N + 1
print('Tested: %d of total: %d candidate cells for image %d of %d files' %(total_counter, len(cc), i + 1, len(onlyfiles_mask)))
""" Get mask of regions that have overlap """
binary_overlap = overlap_im > 0
labelled = measure.label(binary_overlap)
cc_overlap = measure.regionprops(labelled, intensity_image=overlap_im)
masked = np.zeros(seg_im.shape)
no_overlap = np.zeros(seg_im.shape)
for M in range(len(cc_overlap)):
overlap_val = cc_overlap[M]['MaxIntensity']
overlap_coords = cc_overlap[M]['coords']
if overlap_val > 1: # if there is overlap
for T in range(len(overlap_coords)):
masked[overlap_coords[T,0], overlap_coords[T,1]] = seg_im[overlap_coords[T,0], overlap_coords[T,1]]
else: # no overlap
| for T in range(len(overlap_coords)):
no_overlap[overlap_coords[T,0], overlap_coords[T,1]] = seg_im[overlap_coords[T,0], overlap_coords[T,1]] | conditional_block |
|
test_network_moreMeasures.py | onlyfiles_mask.sort(key = natsort_key1)
counter = list(range(len(onlyfiles_mask))) # create a counter, so can randomize it
""" Load avg_img and std_img from TRAINING SET """
mean_arr = 0; std_arr = 0;
with open('./Data_functions/mean_arr.pkl', 'rb') as f: # Python 3: open(..., 'rb')
loaded = pickle.load(f)
mean_arr = loaded[0]
with open('./Data_functions/std_arr.pkl', 'rb') as f: # Python 3: open(..., 'rb')
loaded = pickle.load(f)
std_arr = loaded[0]
batch_x = []; batch_y = [];
for i in range(len(onlyfiles_mask)):
total_counter = 0
filename = onlyfiles_mask[counter[i]]
if filename.split('.')[-1] != 'tif':
continue
filename_split = filename.split('.')[0]
""" Load image """
#size = 3788 # 4775 and 6157 for the newest one
input_arr = readIm_counter(input_path,onlyfiles_mask, counter[i])
size_whole = input_arr.size[0]
""" Resize the input to be on scale of 0.6904 um/px """
size = int(size_whole) # 4775 and 6157 for the newest one
if resize:
size = int((size * im_scale) / 0.6904) # 4775 and 6157 for the newest one
input_arr = resize_adaptive(input_arr, size, method=Image.BICUBIC)
size_whole = input_arr.size
""" DO CLAHE """
if CLAHE == 1:
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
input_arr = np.asarray(input_arr)
red = clahe.apply(np.asarray(input_arr[:,:,0], dtype=np.uint8))
input_arr.setflags(write=1)
input_arr[:,:,0] = red
DAPI = clahe.apply(np.asarray(input_arr[:,:,2], dtype=np.uint8))
input_arr[:,:,2] = DAPI
input_arr = Image.fromarray(input_arr)
""" Pre-process and identify candidate nuclei """
DAPI_size = round(radius * radius * math.pi);
DAPI_tmp, total_matched_DAPI, total_DAPI, back_subbed = pre_process(input_arr, counter[i], DAPI_size, rolling_ball, name=onlyfiles_mask[counter[i]], sav_dir=sav_dir)
if rolling_ball > 0:
plt.imsave(sav_dir + 'background_subbed' + '_' + filename_split + '_' + str(i) + '.tiff', (Image.fromarray(np.asarray(back_subbed, dtype=np.uint8))))
labelled = measure.label(DAPI_tmp)
cc = measure.regionprops(labelled)
""" Initiate list of MACHINE counted CELL OBJECTS """
num_MBP_pos = len(cc)
list_M_cells = []
for T in range(num_MBP_pos):
cell = Cell(T)
list_M_cells.append(cell)
""" start looping and cropping """
N = 0 # SKIP THE FIRST DAPI POINT b/c it's the background?
table_results = []
seg_im = np.zeros(np.shape(DAPI_tmp))
overlap_im = np.zeros(np.shape(DAPI_tmp))
DAPI_im = np.zeros(np.shape(DAPI_tmp))
if np.shape(DAPI_tmp)[0] < 1024 or np.shape(DAPI_tmp)[1] < 640:
seg_im = np.zeros([1024, 640])
overlap_im = np.zeros([1024, 640])
DAPI_im = np.zeros([1024, 640])
while N < len(cc):
DAPI_idx = cc[N]['centroid']
if rotate:
width_x = 640
# extract CROP outo of everything
input_crop, coords = adapt_crop_DAPI(input_arr, DAPI_idx, length=len_x, width=width_x)
# some reason converting PIL to array results in a [x,y,4] array... if so, remove the last matrix
if input_crop.shape[-1] == 4 and channels == 3:
tmp = input_crop[:, :, 0:3]
input_crop = tmp
""" Create empty image with ONLY the DAPI at the DAPI_idx """
DAPI_coords = cc[N]['coords']
tmp = np.zeros(np.shape(DAPI_tmp))
for T in range(len(DAPI_coords)):
tmp[DAPI_coords[T,0], DAPI_coords[T,1]] = 255
tmp = Image.fromarray(tmp)
DAPI_crop, coords = adapt_crop_DAPI(tmp, DAPI_idx, length=len_x, width=width_x)
"""ENSURE COORDINATES WITHIN correct size"""
size = np.shape(input_crop)
width = size[0]
length = size[1]
while True:
print('Adapt crop')
c_width = int(coords[1]) - int(coords[0])
c_length = int(coords[3]) - int(coords[2])
if c_width > width: coords[1] = coords[1] - 1
elif c_width < width:
coords[1] = coords[1] + 1
if coords[1] > width: # in case it goest out of bounds
coords[0] = coords[0] - 1
if c_length > length: coords[3] = coords[3] - 1
elif c_length < length:
coords[3] = coords[3] + 1
if coords[3] > length:
coords[2] = coords[2] - 1
if c_width == width and c_length == length:
break;
""" Delete green channel by making it the DAPI_mask instead
"""
input_crop[:, :, 1] = np.zeros([len_x,width_x])
if channels == 4:
tmp = np.zeros([len_x,width_x,channels])
tmp[:,:,0:3] = input_crop
tmp[:,:,3] = DAPI_crop
input_crop = tmp
elif channels == 3:
input_crop[:,:,1] = DAPI_crop
""" FOR ROTATING THE IMAGE OR ADDING BLACK LINES TO THE SIDES """
deg_rotated = randint(0, 360)
if rotate:
# ROTATE the input_im
width_x = 1024
np_zeros = np.zeros([len_x, width_x, 3])
np_zeros[:,192:832, :] = input_crop[:, :, :]
if rand_rot:
im = Image.fromarray(np.asarray(np_zeros, dtype=np.uint8))
rotated = im.rotate(deg_rotated)
input_crop = np.asarray(rotated, dtype=np.float32)
else:
input_crop = np_zeros # delete this to do rotations
input_crop_save = np.copy(input_crop)
""" Normalize the image first """
input_crop = normalize_im(input_crop, mean_arr, std_arr)
truth_im = np.zeros([len_x, width_x, 2])
""" set inputs and truth """
batch_x.append(input_crop)
batch_y.append(truth_im)
feed_dict = {x:batch_x, y_:batch_y, training:0}
""" FEED_INPUT to NETWORK """
output = softMaxed.eval(feed_dict=feed_dict)
classification = np.argmax(output, axis = -1)[0]
if rotate: # reverse the rotation/adding black edges
# ROTATE the input_im
if rand_rot:
im = Image.fromarray(np.asarray(classification, dtype=np.uint8))
rotated = im.rotate(-deg_rotated)
classification = np.asarray(rotated, dtype=np.float32)
width_x = 640
np_zeros = np.zeros([len_x, width_x])
np_zeros[:, :] = classification[:, 192:832]
classification = np_zeros # delete this to do rotations
""" Plot for debug """
# if debug:
# plt.figure('Out'); plt.clf; plt.subplot(224); show_norm(input_crop[:, :, 0:3]); plt.pause(0.05);
# plt.subplot(221);
# true_m = np.argmax((batch_y[0]).astype('uint8'), axis=-1); plt.imshow(true_m);
# plt.title('Truth');
# plt.subplot(222); plt.imshow(DAPI_crop); plt.title('DAPI_mask');
# plt.subplot(223); plt.imshow(classification); plt.title('Output_seg');
# plt | onlyfiles_mask = [ f for f in listdir(input_path) if isfile(join(input_path,f))]
natsort_key1 = natsort_keygen(key = lambda y: y.lower()) # natural sorting order | random_line_split |
|
test_network_moreMeasures.py | (s_path, sav_dir, input_path, checkpoint,
im_scale, minLength, minSingle, minLengthDuring, radius,
len_x, width_x, channels, CLAHE, rotate, jacc_test, rand_rot, rolling_ball, resize,
debug):
try:
tf.reset_default_graph() # necessary?
# Variable Declaration
x = tf.placeholder('float32', shape=[None, len_x, width_x, channels], name='InputImage')
y_ = tf.placeholder('float32', shape=[None, len_x, width_x, 2], name='CorrectLabel')
training = tf.placeholder(tf.bool, name='training')
""" Creates network """
y, y_b, L1, L2, L3, L4, L5, L6, L7, L8, L9, L9_conv, L10, L11, logits, softMaxed = create_network(x, y_, training)
sess = tf.InteractiveSession()
""" TO LOAD OLD CHECKPOINT """
saver = tf.train.Saver()
saver.restore(sess, s_path + 'check_' + checkpoint)
""" Pre-processing """
# Read in file names
onlyfiles_mask = [ f for f in listdir(input_path) if isfile(join(input_path,f))]
natsort_key1 = natsort_keygen(key = lambda y: y.lower()) # natural sorting order
onlyfiles_mask.sort(key = natsort_key1)
counter = list(range(len(onlyfiles_mask))) # create a counter, so can randomize it
""" Load avg_img and std_img from TRAINING SET """
mean_arr = 0; std_arr = 0;
with open('./Data_functions/mean_arr.pkl', 'rb') as f: # Python 3: open(..., 'rb')
loaded = pickle.load(f)
mean_arr = loaded[0]
with open('./Data_functions/std_arr.pkl', 'rb') as f: # Python 3: open(..., 'rb')
loaded = pickle.load(f)
std_arr = loaded[0]
batch_x = []; batch_y = [];
for i in range(len(onlyfiles_mask)):
total_counter = 0
filename = onlyfiles_mask[counter[i]]
if filename.split('.')[-1] != 'tif':
continue
filename_split = filename.split('.')[0]
""" Load image """
#size = 3788 # 4775 and 6157 for the newest one
input_arr = readIm_counter(input_path,onlyfiles_mask, counter[i])
size_whole = input_arr.size[0]
""" Resize the input to be on scale of 0.6904 um/px """
size = int(size_whole) # 4775 and 6157 for the newest one
if resize:
size = int((size * im_scale) / 0.6904) # 4775 and 6157 for the newest one
input_arr = resize_adaptive(input_arr, size, method=Image.BICUBIC)
size_whole = input_arr.size
""" DO CLAHE """
if CLAHE == 1:
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
input_arr = np.asarray(input_arr)
red = clahe.apply(np.asarray(input_arr[:,:,0], dtype=np.uint8))
input_arr.setflags(write=1)
input_arr[:,:,0] = red
DAPI = clahe.apply(np.asarray(input_arr[:,:,2], dtype=np.uint8))
input_arr[:,:,2] = DAPI
input_arr = Image.fromarray(input_arr)
""" Pre-process and identify candidate nuclei """
DAPI_size = round(radius * radius * math.pi);
DAPI_tmp, total_matched_DAPI, total_DAPI, back_subbed = pre_process(input_arr, counter[i], DAPI_size, rolling_ball, name=onlyfiles_mask[counter[i]], sav_dir=sav_dir)
if rolling_ball > 0:
plt.imsave(sav_dir + 'background_subbed' + '_' + filename_split + '_' + str(i) + '.tiff', (Image.fromarray(np.asarray(back_subbed, dtype=np.uint8))))
labelled = measure.label(DAPI_tmp)
cc = measure.regionprops(labelled)
""" Initiate list of MACHINE counted CELL OBJECTS """
num_MBP_pos = len(cc)
list_M_cells = []
for T in range(num_MBP_pos):
cell = Cell(T)
list_M_cells.append(cell)
""" start looping and cropping """
N = 0 # SKIP THE FIRST DAPI POINT b/c it's the background?
table_results = []
seg_im = np.zeros(np.shape(DAPI_tmp))
overlap_im = np.zeros(np.shape(DAPI_tmp))
DAPI_im = np.zeros(np.shape(DAPI_tmp))
if np.shape(DAPI_tmp)[0] < 1024 or np.shape(DAPI_tmp)[1] < 640:
seg_im = np.zeros([1024, 640])
overlap_im = np.zeros([1024, 640])
DAPI_im = np.zeros([1024, 640])
while N < len(cc):
DAPI_idx = cc[N]['centroid']
if rotate:
width_x = 640
# extract CROP outo of everything
input_crop, coords = adapt_crop_DAPI(input_arr, DAPI_idx, length=len_x, width=width_x)
# some reason converting PIL to array results in a [x,y,4] array... if so, remove the last matrix
if input_crop.shape[-1] == 4 and channels == 3:
tmp = input_crop[:, :, 0:3]
input_crop = tmp
""" Create empty image with ONLY the DAPI at the DAPI_idx """
DAPI_coords = cc[N]['coords']
tmp = np.zeros(np.shape(DAPI_tmp))
for T in range(len(DAPI_coords)):
tmp[DAPI_coords[T,0], DAPI_coords[T,1]] = 255
tmp = Image.fromarray(tmp)
DAPI_crop, coords = adapt_crop_DAPI(tmp, DAPI_idx, length=len_x, width=width_x)
"""ENSURE COORDINATES WITHIN correct size"""
size = np.shape(input_crop)
width = size[0]
length = size[1]
while True:
print('Adapt crop')
c_width = int(coords[1]) - int(coords[0])
c_length = int(coords[3]) - int(coords[2])
if c_width > width: coords[1] = coords[1] - 1
elif c_width < width:
coords[1] = coords[1] + 1
if coords[1] > width: # in case it goest out of bounds
coords[0] = coords[0] - 1
if c_length > length: coords[3] = coords[3] - 1
elif c_length < length:
coords[3] = coords[3] + 1
if coords[3] > length:
coords[2] = coords[2] - 1
if c_width == width and c_length == length:
break;
""" Delete green channel by making it the DAPI_mask instead
"""
input_crop[:, :, 1] = np.zeros([len_x,width_x])
if channels == 4:
tmp = np.zeros([len_x,width_x,channels])
tmp[:,:,0:3] = input_crop
tmp[:,:,3] = DAPI_crop
input_crop = tmp
elif channels == 3:
input_crop[:,:,1] = DAPI_crop
""" FOR ROTATING THE IMAGE OR ADDING BLACK LINES TO THE SIDES """
deg_rotated = randint(0, 360)
if rotate:
# ROTATE the input_im
width_x = 1024
np_zeros = np.zeros([len_x, width_x, 3])
np_zeros[:,192:832, :] = input_crop[:, :, :]
if rand_rot:
im = Image.fromarray(np.asarray(np_zeros, dtype=np.uint8))
rotated = im.rotate(deg_rotated)
input_crop = np.asarray(rotated, dtype=np.float32)
else:
input_crop = np_zeros # delete this to do rotations
input_crop_save = np.copy(input_crop)
""" Normalize the image first """
input_crop = normalize_im(input_crop, mean_arr, std_arr)
truth_im = np.zeros([len_x, width_x, 2])
""" set inputs and truth """
batch_x.append(input_crop)
batch_y.append(truth_im)
feed_dict = {x:batch_x, y_:batch_y, training:0}
""" FEED_INPUT to NETWORK """
output = softMaxed.eval(feed_dict=feed_dict)
classification = np.argmax(output, axis = -1)[0]
if rotate: # reverse the rotation/adding | run_analysis | identifier_name |
|
filter_jpeg.js | = options.maxEntrySize;
this._onIFDEntry = options.onIFDEntry;
// internal data
this._markerCode = 0;
this._bytesLeft = 0;
this._segmentLength = 0;
this._app1buffer = null;
this._app1pos = 0;
this._bytesRead = 0;
//
this._BufferConstructor = null;
this._bufferUseAlloc = false;
this._bufferUseFrom = false;
}
function toHex(number) |
// Perform a shallow copy of a buffer or typed array
//
function slice(buf, start, end) {
if (buf.slice && buf.copy && buf.writeDoubleBE) {
//
// Looks like node.js buffer
//
// - we use buf.slice() in node.js buffers because
// buf.subarray() is not a buffer
//
// - we use buf.subarray() in uint8arrays because
// buf.slice() is not a shallow copy
//
return buf.slice(start, end);
}
return buf.subarray(start, end);
}
// Copy one buffer to another
//
function copy(src, dst, dst_offset) {
if (src.length + dst_offset > dst.length) throw new Error('buffer is too small');
if (src.copy) {
src.copy(dst, dst_offset);
} else {
dst.set(src, dst_offset);
}
}
JpegFilter.prototype._error = function (message, code) {
// double error?
if (this._state === FINAL) return;
let err = new Error(message);
err.code = code;
this._state = FINAL;
this.onError(err);
};
// Detect required output type by first input chunk
JpegFilter.prototype._detectBuffer = function (data) {
if (this._BufferConstructor) return;
this._BufferConstructor = data.constructor;
this._bufferUseAlloc = typeof data.constructor.alloc === 'function';
this._bufferUseFrom = typeof data.constructor.from === 'function';
};
// Helper to allocate output with proper class type (Uint8Array|Buffer)
// All this magic is required only to make code work in browser too.
JpegFilter.prototype._buffer = function (arg) {
let cls = this._BufferConstructor;
/* eslint-disable new-cap */
if (typeof arg === 'number') {
return this._bufferUseAlloc ? cls.alloc(arg) : new cls(arg);
}
return this._bufferUseFrom ? cls.from(arg) : new cls(arg);
};
/* eslint-disable max-depth */
JpegFilter.prototype.push = function (data) {
// guess output datd type by first input chunk
this._detectBuffer(data);
var buf, di, i = 0;
while (i < data.length) {
let b = data[i];
switch (this._state) {
// start of the file, read signature (FF)
case FILE_START:
if (b !== 0xFF) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this._state = FILE_START_FF;
i++;
break;
// start of the file, read signature (D8)
case FILE_START_FF:
if (b !== 0xD8) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this.onData(this._buffer([ 0xFF, 0xD8 ]));
this._state = SEGMENT_START;
i++;
break;
// start of a segment, expect to read FF
case SEGMENT_START:
if (this._markerCode === 0xDA) {
// previous segment was SOS, so we should read image data instead
this._state = IMAGE;
break;
}
if (b !== 0xFF) {
this._error('unexpected byte at segment start: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return;
}
this._state = SEGMENT_MARKER;
i++;
break;
// read marker ID
case SEGMENT_MARKER:
// standalone markers, according to JPEG 1992,
// http://www.w3.org/Graphics/JPEG/itu-t81.pdf, see Table B.1
if ((0xD0 <= b && b <= 0xD9) || b === 0x01) {
this._markerCode = b;
this._bytesLeft = 0;
this._segmentLength = 0;
if (this._markerCode === 0xD9 /* EOI */) {
this.onData(this._buffer([ 0xFF, 0xD9 ]));
this._state = FINAL;
this.onEnd();
} else {
this._state = SEGMENT_LENGTH;
}
i++;
break;
}
// the rest of the unreserved markers
if (0xC0 <= b && b <= 0xFE) {
this._markerCode = b;
this._bytesLeft = 2;
this._segmentLength = 0;
this._state = SEGMENT_LENGTH;
i++;
break;
}
if (b === 0xFF) {
// padding byte, skip it
i++;
break;
}
// unknown markers
this._error('unknown marker: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return; // return after error, not break
// read segment length (2 bytes total)
case SEGMENT_LENGTH:
while (this._bytesLeft > 0 && i < data.length) {
this._segmentLength = this._segmentLength * 0x100 + data[i];
this._bytesLeft--;
i++;
}
if (this._bytesLeft <= 0) {
if (this._comment !== null && typeof this._comment !== 'undefined' && this._markerCode !== 0xE0) {
// insert comment field before any other markers (except APP0)
//
// (we can insert it anywhere, but JFIF segment being first
// looks nicer in hexdump)
//
let enc;
try {
// poor man's utf8 encoding
enc = unescape(encodeURIComponent(this._comment));
} catch (err) {
enc = this._comment;
}
buf = this._buffer(5 + enc.length);
buf[0] = 0xFF;
buf[1] = 0xFE;
buf[2] = ((enc.length + 3) >>> 8) & 0xFF;
buf[3] = (enc.length + 3) & 0xFF;
/* eslint-disable no-loop-func */
enc.split('').forEach((c, pos) => {
buf[pos + 4] = c.charCodeAt(0) & 0xFF;
});
buf[buf.length - 1] = 0;
this._comment = null;
this.onData(buf);
}
if (this._markerCode === 0xE0) {
// APP0, 14-byte JFIF header
this._state = SEGMENT_PIPE;
} else if (this._markerCode === 0xE1) {
// APP1, Exif candidate
this._state = this._filter && this._removeExif ?
SEGMENT_IGNORE : // ignore if we remove both
SEGMENT_BUFFER;
} else if (this._markerCode === 0xE2) {
// APP2, ICC_profile
this._state = this._removeICC ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode > 0xE2 && this._markerCode < 0xF0) {
// Photoshop metadata, etc.
this._state = this._filter ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode === 0xFE) {
// Comments
this._state = this._removeComments ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else {
// other valid headers
this._state = this._removeImage ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
}
this._bytesLeft = Math.max(this._segmentLength - 2, 0);
}
break;
// read segment and ignore it
case SEGMENT_IGNORE:
di = Math.min(this._bytesLeft, data.length - i);
i += di;
this._bytesLeft -= di;
if (this._bytesLeft <= 0) this._state = SEGMENT_START;
break;
// read segment and pass it into output
case SEGMENT_PIPE:
if (this._bytesLeft <= 0) {
this._state = SEGMENT_START;
} else {
this._state = SEGMENT_PIPE_DATA;
}
buf = this._buffer(4);
buf[0] = 0xFF;
buf[1] = this._markerCode;
buf | {
let n = number.toString(16).toUpperCase();
for (let i = 2 - n.length; i > 0; i--) n = '0' + n;
return '0x' + n;
} | identifier_body |
filter_jpeg.js | = options.maxEntrySize;
this._onIFDEntry = options.onIFDEntry;
// internal data
this._markerCode = 0;
this._bytesLeft = 0;
this._segmentLength = 0;
this._app1buffer = null;
this._app1pos = 0;
this._bytesRead = 0;
//
this._BufferConstructor = null;
this._bufferUseAlloc = false;
this._bufferUseFrom = false;
}
function | (number) {
let n = number.toString(16).toUpperCase();
for (let i = 2 - n.length; i > 0; i--) n = '0' + n;
return '0x' + n;
}
// Perform a shallow copy of a buffer or typed array
//
function slice(buf, start, end) {
if (buf.slice && buf.copy && buf.writeDoubleBE) {
//
// Looks like node.js buffer
//
// - we use buf.slice() in node.js buffers because
// buf.subarray() is not a buffer
//
// - we use buf.subarray() in uint8arrays because
// buf.slice() is not a shallow copy
//
return buf.slice(start, end);
}
return buf.subarray(start, end);
}
// Copy one buffer to another
//
function copy(src, dst, dst_offset) {
if (src.length + dst_offset > dst.length) throw new Error('buffer is too small');
if (src.copy) {
src.copy(dst, dst_offset);
} else {
dst.set(src, dst_offset);
}
}
JpegFilter.prototype._error = function (message, code) {
// double error?
if (this._state === FINAL) return;
let err = new Error(message);
err.code = code;
this._state = FINAL;
this.onError(err);
};
// Detect required output type by first input chunk
JpegFilter.prototype._detectBuffer = function (data) {
if (this._BufferConstructor) return;
this._BufferConstructor = data.constructor;
this._bufferUseAlloc = typeof data.constructor.alloc === 'function';
this._bufferUseFrom = typeof data.constructor.from === 'function';
};
// Helper to allocate output with proper class type (Uint8Array|Buffer)
// All this magic is required only to make code work in browser too.
JpegFilter.prototype._buffer = function (arg) {
let cls = this._BufferConstructor;
/* eslint-disable new-cap */
if (typeof arg === 'number') {
return this._bufferUseAlloc ? cls.alloc(arg) : new cls(arg);
}
return this._bufferUseFrom ? cls.from(arg) : new cls(arg);
};
/* eslint-disable max-depth */
JpegFilter.prototype.push = function (data) {
// guess output datd type by first input chunk
this._detectBuffer(data);
var buf, di, i = 0;
while (i < data.length) {
let b = data[i];
switch (this._state) {
// start of the file, read signature (FF)
case FILE_START:
if (b !== 0xFF) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this._state = FILE_START_FF;
i++;
break;
// start of the file, read signature (D8)
case FILE_START_FF:
if (b !== 0xD8) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this.onData(this._buffer([ 0xFF, 0xD8 ]));
this._state = SEGMENT_START;
i++;
break;
// start of a segment, expect to read FF
case SEGMENT_START:
if (this._markerCode === 0xDA) {
// previous segment was SOS, so we should read image data instead
this._state = IMAGE;
break;
}
if (b !== 0xFF) {
this._error('unexpected byte at segment start: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return;
}
this._state = SEGMENT_MARKER;
i++;
break;
// read marker ID
case SEGMENT_MARKER:
// standalone markers, according to JPEG 1992,
// http://www.w3.org/Graphics/JPEG/itu-t81.pdf, see Table B.1
if ((0xD0 <= b && b <= 0xD9) || b === 0x01) {
this._markerCode = b;
this._bytesLeft = 0;
this._segmentLength = 0;
if (this._markerCode === 0xD9 /* EOI */) {
this.onData(this._buffer([ 0xFF, 0xD9 ]));
this._state = FINAL;
this.onEnd();
} else {
this._state = SEGMENT_LENGTH;
}
i++;
break;
}
// the rest of the unreserved markers
if (0xC0 <= b && b <= 0xFE) {
this._markerCode = b;
this._bytesLeft = 2;
this._segmentLength = 0;
this._state = SEGMENT_LENGTH;
i++;
break;
}
if (b === 0xFF) {
// padding byte, skip it
i++;
break;
}
// unknown markers
this._error('unknown marker: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return; // return after error, not break
// read segment length (2 bytes total)
case SEGMENT_LENGTH:
while (this._bytesLeft > 0 && i < data.length) {
this._segmentLength = this._segmentLength * 0x100 + data[i];
this._bytesLeft--;
i++;
}
if (this._bytesLeft <= 0) {
if (this._comment !== null && typeof this._comment !== 'undefined' && this._markerCode !== 0xE0) {
// insert comment field before any other markers (except APP0)
//
// (we can insert it anywhere, but JFIF segment being first
// looks nicer in hexdump)
//
let enc;
try {
// poor man's utf8 encoding
enc = unescape(encodeURIComponent(this._comment));
} catch (err) {
enc = this._comment;
}
buf = this._buffer(5 + enc.length);
buf[0] = 0xFF;
buf[1] = 0xFE;
buf[2] = ((enc.length + 3) >>> 8) & 0xFF;
buf[3] = (enc.length + 3) & 0xFF;
/* eslint-disable no-loop-func */
enc.split('').forEach((c, pos) => {
buf[pos + 4] = c.charCodeAt(0) & 0xFF;
});
buf[buf.length - 1] = 0;
this._comment = null;
this.onData(buf);
}
if (this._markerCode === 0xE0) {
// APP0, 14-byte JFIF header
this._state = SEGMENT_PIPE;
} else if (this._markerCode === 0xE1) {
// APP1, Exif candidate
this._state = this._filter && this._removeExif ?
SEGMENT_IGNORE : // ignore if we remove both
SEGMENT_BUFFER;
} else if (this._markerCode === 0xE2) {
// APP2, ICC_profile
this._state = this._removeICC ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode > 0xE2 && this._markerCode < 0xF0) {
// Photoshop metadata, etc.
this._state = this._filter ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode === 0xFE) {
// Comments
this._state = this._removeComments ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else {
// other valid headers
this._state = this._removeImage ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
}
this._bytesLeft = Math.max(this._segmentLength - 2, 0);
}
break;
// read segment and ignore it
case SEGMENT_IGNORE:
di = Math.min(this._bytesLeft, data.length - i);
i += di;
this._bytesLeft -= di;
if (this._bytesLeft <= 0) this._state = SEGMENT_START;
break;
// read segment and pass it into output
case SEGMENT_PIPE:
if (this._bytesLeft <= 0) {
this._state = SEGMENT_START;
} else {
this._state = SEGMENT_PIPE_DATA;
}
buf = this._buffer(4);
buf[0] = 0xFF;
buf[1] = this._markerCode;
buf[ | toHex | identifier_name |
filter_jpeg.js | JpegFilter.prototype._error = function (message, code) {
// double error?
if (this._state === FINAL) return;
let err = new Error(message);
err.code = code;
this._state = FINAL;
this.onError(err);
};
// Detect required output type by first input chunk
JpegFilter.prototype._detectBuffer = function (data) {
if (this._BufferConstructor) return;
this._BufferConstructor = data.constructor;
this._bufferUseAlloc = typeof data.constructor.alloc === 'function';
this._bufferUseFrom = typeof data.constructor.from === 'function';
};
// Helper to allocate output with proper class type (Uint8Array|Buffer)
// All this magic is required only to make code work in browser too.
JpegFilter.prototype._buffer = function (arg) {
let cls = this._BufferConstructor;
/* eslint-disable new-cap */
if (typeof arg === 'number') {
return this._bufferUseAlloc ? cls.alloc(arg) : new cls(arg);
}
return this._bufferUseFrom ? cls.from(arg) : new cls(arg);
};
/* eslint-disable max-depth */
JpegFilter.prototype.push = function (data) {
// guess output datd type by first input chunk
this._detectBuffer(data);
var buf, di, i = 0;
while (i < data.length) {
let b = data[i];
switch (this._state) {
// start of the file, read signature (FF)
case FILE_START:
if (b !== 0xFF) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this._state = FILE_START_FF;
i++;
break;
// start of the file, read signature (D8)
case FILE_START_FF:
if (b !== 0xD8) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this.onData(this._buffer([ 0xFF, 0xD8 ]));
this._state = SEGMENT_START;
i++;
break;
// start of a segment, expect to read FF
case SEGMENT_START:
if (this._markerCode === 0xDA) {
// previous segment was SOS, so we should read image data instead
this._state = IMAGE;
break;
}
if (b !== 0xFF) {
this._error('unexpected byte at segment start: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return;
}
this._state = SEGMENT_MARKER;
i++;
break;
// read marker ID
case SEGMENT_MARKER:
// standalone markers, according to JPEG 1992,
// http://www.w3.org/Graphics/JPEG/itu-t81.pdf, see Table B.1
if ((0xD0 <= b && b <= 0xD9) || b === 0x01) {
this._markerCode = b;
this._bytesLeft = 0;
this._segmentLength = 0;
if (this._markerCode === 0xD9 /* EOI */) {
this.onData(this._buffer([ 0xFF, 0xD9 ]));
this._state = FINAL;
this.onEnd();
} else {
this._state = SEGMENT_LENGTH;
}
i++;
break;
}
// the rest of the unreserved markers
if (0xC0 <= b && b <= 0xFE) {
this._markerCode = b;
this._bytesLeft = 2;
this._segmentLength = 0;
this._state = SEGMENT_LENGTH;
i++;
break;
}
if (b === 0xFF) {
// padding byte, skip it
i++;
break;
}
// unknown markers
this._error('unknown marker: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return; // return after error, not break
// read segment length (2 bytes total)
case SEGMENT_LENGTH:
while (this._bytesLeft > 0 && i < data.length) {
this._segmentLength = this._segmentLength * 0x100 + data[i];
this._bytesLeft--;
i++;
}
if (this._bytesLeft <= 0) {
if (this._comment !== null && typeof this._comment !== 'undefined' && this._markerCode !== 0xE0) {
// insert comment field before any other markers (except APP0)
//
// (we can insert it anywhere, but JFIF segment being first
// looks nicer in hexdump)
//
let enc;
try {
// poor man's utf8 encoding
enc = unescape(encodeURIComponent(this._comment));
} catch (err) {
enc = this._comment;
}
buf = this._buffer(5 + enc.length);
buf[0] = 0xFF;
buf[1] = 0xFE;
buf[2] = ((enc.length + 3) >>> 8) & 0xFF;
buf[3] = (enc.length + 3) & 0xFF;
/* eslint-disable no-loop-func */
enc.split('').forEach((c, pos) => {
buf[pos + 4] = c.charCodeAt(0) & 0xFF;
});
buf[buf.length - 1] = 0;
this._comment = null;
this.onData(buf);
}
if (this._markerCode === 0xE0) {
// APP0, 14-byte JFIF header
this._state = SEGMENT_PIPE;
} else if (this._markerCode === 0xE1) {
// APP1, Exif candidate
this._state = this._filter && this._removeExif ?
SEGMENT_IGNORE : // ignore if we remove both
SEGMENT_BUFFER;
} else if (this._markerCode === 0xE2) {
// APP2, ICC_profile
this._state = this._removeICC ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode > 0xE2 && this._markerCode < 0xF0) {
// Photoshop metadata, etc.
this._state = this._filter ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode === 0xFE) {
// Comments
this._state = this._removeComments ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else {
// other valid headers
this._state = this._removeImage ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
}
this._bytesLeft = Math.max(this._segmentLength - 2, 0);
}
break;
// read segment and ignore it
case SEGMENT_IGNORE:
di = Math.min(this._bytesLeft, data.length - i);
i += di;
this._bytesLeft -= di;
if (this._bytesLeft <= 0) this._state = SEGMENT_START;
break;
// read segment and pass it into output
case SEGMENT_PIPE:
if (this._bytesLeft <= 0) {
this._state = SEGMENT_START;
} else {
this._state = SEGMENT_PIPE_DATA;
}
buf = this._buffer(4);
buf[0] = 0xFF;
buf[1] = this._markerCode;
buf[2] = ((this._bytesLeft + 2) >>> 8) & 0xFF;
buf[3] = (this._bytesLeft + 2) & 0xFF;
this.onData(buf);
break;
// read segment and pass it into output
case SEGMENT_PIPE_DATA:
di = Math.min(this._bytesLeft, data.length - i);
this.onData(slice(data, i, i + di));
i += di;
this._bytesLeft -= di;
if (this._bytesLeft <= 0) this._state = SEGMENT_START;
break;
// read segment and buffer it, process as exif
case SEGMENT_BUFFER:
this._app1buffer = this._buffer(this._bytesLeft);
this._app1pos = 0;
this._state = SEGMENT_BUFFER_DATA;
break;
// read segment and buffer it, process as exif
case SEGMENT_BUFFER_DATA:
di = Math.min(this._bytesLeft, data.length - i);
let buf_slice = slice(data, i, i + di);
copy(buf_slice, this._app1buffer, this._app1pos);
this._app1pos += buf_slice.length;
i += di;
this._bytesLeft -= di;
if (this._bytesLeft <= 0) {
let buf = this._app1buffer;
this._app1buffer = null;
if (this._markerCode === 0xE1 /* APP1 */ &&
// compare with 'Exif\0\0'
buf[0] === 0x45 && buf[1] === 0x78 && buf[2] === 0x69 && | random_line_split |
||
filter_jpeg.js | = options.maxEntrySize;
this._onIFDEntry = options.onIFDEntry;
// internal data
this._markerCode = 0;
this._bytesLeft = 0;
this._segmentLength = 0;
this._app1buffer = null;
this._app1pos = 0;
this._bytesRead = 0;
//
this._BufferConstructor = null;
this._bufferUseAlloc = false;
this._bufferUseFrom = false;
}
function toHex(number) {
let n = number.toString(16).toUpperCase();
for (let i = 2 - n.length; i > 0; i--) n = '0' + n;
return '0x' + n;
}
// Perform a shallow copy of a buffer or typed array
//
function slice(buf, start, end) {
if (buf.slice && buf.copy && buf.writeDoubleBE) {
//
// Looks like node.js buffer
//
// - we use buf.slice() in node.js buffers because
// buf.subarray() is not a buffer
//
// - we use buf.subarray() in uint8arrays because
// buf.slice() is not a shallow copy
//
return buf.slice(start, end);
}
return buf.subarray(start, end);
}
// Copy one buffer to another
//
function copy(src, dst, dst_offset) {
if (src.length + dst_offset > dst.length) throw new Error('buffer is too small');
if (src.copy) {
src.copy(dst, dst_offset);
} else {
dst.set(src, dst_offset);
}
}
JpegFilter.prototype._error = function (message, code) {
// double error?
if (this._state === FINAL) return;
let err = new Error(message);
err.code = code;
this._state = FINAL;
this.onError(err);
};
// Detect required output type by first input chunk
JpegFilter.prototype._detectBuffer = function (data) {
if (this._BufferConstructor) return;
this._BufferConstructor = data.constructor;
this._bufferUseAlloc = typeof data.constructor.alloc === 'function';
this._bufferUseFrom = typeof data.constructor.from === 'function';
};
// Helper to allocate output with proper class type (Uint8Array|Buffer)
// All this magic is required only to make code work in browser too.
JpegFilter.prototype._buffer = function (arg) {
let cls = this._BufferConstructor;
/* eslint-disable new-cap */
if (typeof arg === 'number') {
return this._bufferUseAlloc ? cls.alloc(arg) : new cls(arg);
}
return this._bufferUseFrom ? cls.from(arg) : new cls(arg);
};
/* eslint-disable max-depth */
JpegFilter.prototype.push = function (data) {
// guess output datd type by first input chunk
this._detectBuffer(data);
var buf, di, i = 0;
while (i < data.length) {
let b = data[i];
switch (this._state) {
// start of the file, read signature (FF)
case FILE_START:
if (b !== 0xFF) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this._state = FILE_START_FF;
i++;
break;
// start of the file, read signature (D8)
case FILE_START_FF:
if (b !== 0xD8) {
this._error('unknown file format', 'ENOTJPEG', i);
return;
}
this.onData(this._buffer([ 0xFF, 0xD8 ]));
this._state = SEGMENT_START;
i++;
break;
// start of a segment, expect to read FF
case SEGMENT_START:
if (this._markerCode === 0xDA) {
// previous segment was SOS, so we should read image data instead
this._state = IMAGE;
break;
}
if (b !== 0xFF) {
this._error('unexpected byte at segment start: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return;
}
this._state = SEGMENT_MARKER;
i++;
break;
// read marker ID
case SEGMENT_MARKER:
// standalone markers, according to JPEG 1992,
// http://www.w3.org/Graphics/JPEG/itu-t81.pdf, see Table B.1
if ((0xD0 <= b && b <= 0xD9) || b === 0x01) {
this._markerCode = b;
this._bytesLeft = 0;
this._segmentLength = 0;
if (this._markerCode === 0xD9 /* EOI */) {
this.onData(this._buffer([ 0xFF, 0xD9 ]));
this._state = FINAL;
this.onEnd();
} else {
this._state = SEGMENT_LENGTH;
}
i++;
break;
}
// the rest of the unreserved markers
if (0xC0 <= b && b <= 0xFE) {
this._markerCode = b;
this._bytesLeft = 2;
this._segmentLength = 0;
this._state = SEGMENT_LENGTH;
i++;
break;
}
if (b === 0xFF) |
// unknown markers
this._error('unknown marker: ' + toHex(b) +
' (offset ' + toHex(this._bytesRead + i) + ')',
'EBADDATA');
return; // return after error, not break
// read segment length (2 bytes total)
case SEGMENT_LENGTH:
while (this._bytesLeft > 0 && i < data.length) {
this._segmentLength = this._segmentLength * 0x100 + data[i];
this._bytesLeft--;
i++;
}
if (this._bytesLeft <= 0) {
if (this._comment !== null && typeof this._comment !== 'undefined' && this._markerCode !== 0xE0) {
// insert comment field before any other markers (except APP0)
//
// (we can insert it anywhere, but JFIF segment being first
// looks nicer in hexdump)
//
let enc;
try {
// poor man's utf8 encoding
enc = unescape(encodeURIComponent(this._comment));
} catch (err) {
enc = this._comment;
}
buf = this._buffer(5 + enc.length);
buf[0] = 0xFF;
buf[1] = 0xFE;
buf[2] = ((enc.length + 3) >>> 8) & 0xFF;
buf[3] = (enc.length + 3) & 0xFF;
/* eslint-disable no-loop-func */
enc.split('').forEach((c, pos) => {
buf[pos + 4] = c.charCodeAt(0) & 0xFF;
});
buf[buf.length - 1] = 0;
this._comment = null;
this.onData(buf);
}
if (this._markerCode === 0xE0) {
// APP0, 14-byte JFIF header
this._state = SEGMENT_PIPE;
} else if (this._markerCode === 0xE1) {
// APP1, Exif candidate
this._state = this._filter && this._removeExif ?
SEGMENT_IGNORE : // ignore if we remove both
SEGMENT_BUFFER;
} else if (this._markerCode === 0xE2) {
// APP2, ICC_profile
this._state = this._removeICC ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode > 0xE2 && this._markerCode < 0xF0) {
// Photoshop metadata, etc.
this._state = this._filter ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else if (this._markerCode === 0xFE) {
// Comments
this._state = this._removeComments ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
} else {
// other valid headers
this._state = this._removeImage ?
SEGMENT_IGNORE :
SEGMENT_PIPE;
}
this._bytesLeft = Math.max(this._segmentLength - 2, 0);
}
break;
// read segment and ignore it
case SEGMENT_IGNORE:
di = Math.min(this._bytesLeft, data.length - i);
i += di;
this._bytesLeft -= di;
if (this._bytesLeft <= 0) this._state = SEGMENT_START;
break;
// read segment and pass it into output
case SEGMENT_PIPE:
if (this._bytesLeft <= 0) {
this._state = SEGMENT_START;
} else {
this._state = SEGMENT_PIPE_DATA;
}
buf = this._buffer(4);
buf[0] = 0xFF;
buf[1] = this._markerCode;
buf | {
// padding byte, skip it
i++;
break;
} | conditional_block |
ptsrc_gibbs.py | 5)
self.ps = ps
self.b, self.x = None, None
# Prepare the preconditioner. It approximates the noise as the
# same in every pixel, and ignores the cmb-template coupling.
# See M(self,u) for details.
iN_white = np.array(np.sum(np.mean(np.mean(self.iN,-1),-1),0))
# iN_white is now in pixel space, but the preconditioner needs it
# in harmonic space, which introduces a
#norm = np.prod((maps.box[1]-maps.box[0])/maps.shape[-2:])
#norm = 1./np.prod(maps.shape[-2:])
#iN_white /= norm
self.S_prec = en.multi_pow(self.iS + iN_white[:,:,None,None], -1)
# The template
self.set_template(T)
def set_template(self, T):
if T is None: T = np.zeros((0,)+self.d.shape)
self.T = T
self.TT = np.einsum("aijyx,bijyx->ab",self.T,self.T)
self.dof = DOF(Arg(default=self.d[0]), Arg(shape=T.shape[:1]))
def P(self, u):
s, a = self.dof.unzip(u)
return s[None,:,:,:] + np.sum(self.T*a[:,None,None,None,None],0)
def PT(self, d):
return self.dof.zip(np.sum(d,0), np.einsum("qijyx,ijyx->q",self.T, d))
def A(self, u):
s, a = self.dof.unzip(u)
# U"u = [S"s, 0a]
Uu = self.dof.zip(en.harm2map(en.map_mul(self.iS, en.map2harm(s))),a*0)
# P'N"P u
PNPu = self.PT(en.map_mul(self.iN, self.P(u)))
return Uu + PNPu
def M(self, u):
# Multiplying things out, the full expression for A is:
# [ S" + sum(N") sum(N"T) ]
# [ sum(T'N") sum(T'T) ]
# A reasonable approximation for this is
# [ S" + sum(sigma^{-2}) 0 ]
# [ 0 sum(T'T) ]
# which can be directly inverted.
s, a = self.dof.unzip(u)
# Solve for the cmb signal component
res_s = en.harm2map(en.map_mul(self.S_prec,en.map2harm(s)))
res_a = np.linalg.solve(self.TT, a)
return self.dof.zip(res_s, res_a)
def calc_b(self):
PNd = self.PT(en.map_mul(self.iN, self.d))
Uw1_s = en.harm2map(en.map_mul(self.hS, en.rand_gauss_harm(self.d.shape[-3:],self.d.wcs)))
Uw1_a = np.zeros(self.T.shape[0])
Uw1 = self.dof.zip(Uw1_s, Uw1_a)
PNw2 = self.PT(en.map_mul(self.hN, en.rand_gauss(self.d.shape, self.d.wcs)))
return PNd + Uw1 + PNw2
def solve(self, b, x0, verbose=False):
cg = CG(self.A, b, x0=x0*0, M=self.M)
while cg.err > 1e-6:
cg.step()
if verbose:
print "%5d %15.7e %15.7e" % (cg.i, cg.err, cg.err_true) #, self.dof.unzip(cg.x)[1]
#if cg.i % 10 == 0:
# s, a = self.dof.unzip(cg.x)
# matshow(s[0]); colorbar(); show()
return cg.x
def sample(self, verbose=False):
self.b = self.calc_b()
if self.x is None: self.x = self.dof.zip(self.d[0], np.zeros(self.T.shape[0]))
self.x = self.solve(self.b, self.x, verbose)
return self.dof.unzip(self.x)
class PtsrcModel:
"""This class converts from point source shape parameters to amplitude
basis functions."""
def __init__(self, template):
self.pos = template.posmap()
self.nfreq, self.ncomp = template.shape[:2]
self.nparam = self.nfreq*self.ncomp
def get_templates(self, pos, irads):
x = utils.rewind(self.pos - pos[:,None,None],0,2*np.pi)
W = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
xWx = np.sum(np.einsum("ab,byx->ayx", W, x)*x,0)
profile = np.exp(-0.5*xWx)
bases = np.eye(self.nfreq*self.ncomp).reshape(self.nfreq*self.ncomp,self.nfreq,self.ncomp)
return profile[None,None,None]*bases[:,:,:,None,None]
def get_model(self, amps, pos, irads):
return np.sum((self.get_templates(pos, irads).T*amps.T).T,0)
class ShapeSampler:
def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=200, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.maps = maps
self.inoise = inoise
self.model= model
self.nsamp= nsamp
self.stepsize = stepsize
self.amps = amps
self.pos, self.irads = pos, irads
self.pos0 = pos0
self.maxdist=maxdist
self.lik = self.getlik(self.amps, self.pos, self.irads)
def getlik(self, amps, pos, irads):
if irads[0] < 0 or irads[1] < 0: return np.inf
if irads[0]*irads[1]-irads[2]**2 <= 0: return np.inf
sigma, phi = expand_beam(irads)
# The beam has a tendency to run off in unrealistic directions,
# so we need a relatively strong prior on it.
if np.min(sigma) < beam_range[0] or np.max(sigma) > beam_range[1] or np.max(sigma)/np.min(sigma) > beam_max_asym: return np.inf
template = self.model.get_model(amps, pos, irads)
residual = self.maps-template
tmp = np.einsum("fabyx,abyx->fayx",self.inoise, residual)
deviation = np.sum((pos-self.pos0)**2)**0.5/self.maxdist
penalty = 1+max(deviation-1,0)**2
return 0.5*np.sum(tmp*residual)*penalty
def newpos(self, pos):
# Draw pos with gaussian prior centered on previous position
# With a width given by the fiducial beam size.
step = self.stepsize
if np.random.uniform() < 0.1: step*100 # Sometimes try larger steps to break out of ruts
return pos + np.random.standard_normal(2) * beam_fiducial * self.stepsize
def newshape(self, irads):
return irads + np.random.standard_normal(3) * 1.0/beam_fiducial**2 * self.stepsize * 0.5
def newamp(self, amps):
return amps + np.random.standard_normal(len(amps)) * 1000 * self.stepsize
def subsample(self, verbose=False):
pos = self.newpos(self.pos)
lik = self.getlik(self.amps, pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.pos, self.lik = pos, lik
irads = self.newshape(self.irads)
lik = self.getlik(self.amps, self.pos, irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.irads, self.lik = irads, lik
amps = self.newamp(self.amps)
lik = self.getlik(amps, self.pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.amps, self.lik = amps, lik
if verbose:
sigma, phi = expand_beam(self.irads)
print (" %9.2f"*len(self.amps)+" %10.5f %10.5f %8.3f %8.3f %8.3f") % (tuple(self.amps)+tuple(self.pos*r2c)+tuple(sigma*r2b)+(phi*r2c,))
return self.amps, self.pos, self.irads
def | sample | identifier_name |
|
ptsrc_gibbs.py | (self, u):
s, a = self.dof.unzip(u)
return s[None,:,:,:] + np.sum(self.T*a[:,None,None,None,None],0)
def PT(self, d):
return self.dof.zip(np.sum(d,0), np.einsum("qijyx,ijyx->q",self.T, d))
def A(self, u):
s, a = self.dof.unzip(u)
# U"u = [S"s, 0a]
Uu = self.dof.zip(en.harm2map(en.map_mul(self.iS, en.map2harm(s))),a*0)
# P'N"P u
PNPu = self.PT(en.map_mul(self.iN, self.P(u)))
return Uu + PNPu
def M(self, u):
# Multiplying things out, the full expression for A is:
# [ S" + sum(N") sum(N"T) ]
# [ sum(T'N") sum(T'T) ]
# A reasonable approximation for this is
# [ S" + sum(sigma^{-2}) 0 ]
# [ 0 sum(T'T) ]
# which can be directly inverted.
s, a = self.dof.unzip(u)
# Solve for the cmb signal component
res_s = en.harm2map(en.map_mul(self.S_prec,en.map2harm(s)))
res_a = np.linalg.solve(self.TT, a)
return self.dof.zip(res_s, res_a)
def calc_b(self):
PNd = self.PT(en.map_mul(self.iN, self.d))
Uw1_s = en.harm2map(en.map_mul(self.hS, en.rand_gauss_harm(self.d.shape[-3:],self.d.wcs)))
Uw1_a = np.zeros(self.T.shape[0])
Uw1 = self.dof.zip(Uw1_s, Uw1_a)
PNw2 = self.PT(en.map_mul(self.hN, en.rand_gauss(self.d.shape, self.d.wcs)))
return PNd + Uw1 + PNw2
def solve(self, b, x0, verbose=False):
cg = CG(self.A, b, x0=x0*0, M=self.M)
while cg.err > 1e-6:
cg.step()
if verbose:
print "%5d %15.7e %15.7e" % (cg.i, cg.err, cg.err_true) #, self.dof.unzip(cg.x)[1]
#if cg.i % 10 == 0:
# s, a = self.dof.unzip(cg.x)
# matshow(s[0]); colorbar(); show()
return cg.x
def sample(self, verbose=False):
self.b = self.calc_b()
if self.x is None: self.x = self.dof.zip(self.d[0], np.zeros(self.T.shape[0]))
self.x = self.solve(self.b, self.x, verbose)
return self.dof.unzip(self.x)
class PtsrcModel:
"""This class converts from point source shape parameters to amplitude
basis functions."""
def __init__(self, template):
self.pos = template.posmap()
self.nfreq, self.ncomp = template.shape[:2]
self.nparam = self.nfreq*self.ncomp
def get_templates(self, pos, irads):
x = utils.rewind(self.pos - pos[:,None,None],0,2*np.pi)
W = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
xWx = np.sum(np.einsum("ab,byx->ayx", W, x)*x,0)
profile = np.exp(-0.5*xWx)
bases = np.eye(self.nfreq*self.ncomp).reshape(self.nfreq*self.ncomp,self.nfreq,self.ncomp)
return profile[None,None,None]*bases[:,:,:,None,None]
def get_model(self, amps, pos, irads):
return np.sum((self.get_templates(pos, irads).T*amps.T).T,0)
class ShapeSampler:
def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=200, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.maps = maps
self.inoise = inoise
self.model= model
self.nsamp= nsamp
self.stepsize = stepsize
self.amps = amps
self.pos, self.irads = pos, irads
self.pos0 = pos0
self.maxdist=maxdist
self.lik = self.getlik(self.amps, self.pos, self.irads)
def getlik(self, amps, pos, irads):
if irads[0] < 0 or irads[1] < 0: return np.inf
if irads[0]*irads[1]-irads[2]**2 <= 0: return np.inf
sigma, phi = expand_beam(irads)
# The beam has a tendency to run off in unrealistic directions,
# so we need a relatively strong prior on it.
if np.min(sigma) < beam_range[0] or np.max(sigma) > beam_range[1] or np.max(sigma)/np.min(sigma) > beam_max_asym: return np.inf
template = self.model.get_model(amps, pos, irads)
residual = self.maps-template
tmp = np.einsum("fabyx,abyx->fayx",self.inoise, residual)
deviation = np.sum((pos-self.pos0)**2)**0.5/self.maxdist
penalty = 1+max(deviation-1,0)**2
return 0.5*np.sum(tmp*residual)*penalty
def newpos(self, pos):
# Draw pos with gaussian prior centered on previous position
# With a width given by the fiducial beam size.
step = self.stepsize
if np.random.uniform() < 0.1: step*100 # Sometimes try larger steps to break out of ruts
return pos + np.random.standard_normal(2) * beam_fiducial * self.stepsize
def newshape(self, irads):
return irads + np.random.standard_normal(3) * 1.0/beam_fiducial**2 * self.stepsize * 0.5
def newamp(self, amps):
return amps + np.random.standard_normal(len(amps)) * 1000 * self.stepsize
def subsample(self, verbose=False):
pos = self.newpos(self.pos)
lik = self.getlik(self.amps, pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.pos, self.lik = pos, lik
irads = self.newshape(self.irads)
lik = self.getlik(self.amps, self.pos, irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.irads, self.lik = irads, lik
amps = self.newamp(self.amps)
lik = self.getlik(amps, self.pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.amps, self.lik = amps, lik
if verbose:
sigma, phi = expand_beam(self.irads)
print (" %9.2f"*len(self.amps)+" %10.5f %10.5f %8.3f %8.3f %8.3f") % (tuple(self.amps)+tuple(self.pos*r2c)+tuple(sigma*r2b)+(phi*r2c,))
return self.amps, self.pos, self.irads
def sample(self, verbose=False):
"""Draw a new, uncorrelated sample."""
for i in range(self.nsamp): self.subsample(verbose)
return self.amps, self.pos, self.irads
class ShapeSamplerMulti:
| def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=1500, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.samplers = [ShapeSampler(maps, inoise, model, amp1, pos1, pos01, irads1, nsamp=1, stepsize=stepsize, maxdist=maxdist) for amp1, pos1, pos01, irads1 in zip(amps, pos, pos0, irads)]
self.nsamp = nsamp
def sample(self, verbose=False):
for i in range(self.nsamp):
for sampler in self.samplers:
sampler.sample(verbose)
amps = np.array([s.amps for s in self.samplers])
pos = np.array([s.pos for s in self.samplers])
irads= np.array([s.irads for s in self.samplers])
return amps, pos, irads | identifier_body |
|
ptsrc_gibbs.py |
return nmat
def parse_floats(strs): return np.array([float(w) for w in strs.split(",")])
def apodize(m, rad, apod_fun):
scale = m.extent()/m.shape[-2:]
y = np.arange(m.shape[-2])*scale[0]
x = np.arange(m.shape[-1])*scale[1]
yfun = apod_fun(y, rad)*apod_fun(y[-1]-y, rad)
xfun = apod_fun(x, rad)*apod_fun(x[-1]-x, rad)
a = yfun[:,None]*xfun[None,:]
return m*a
def apod_step(x, r): return x>r
def apod_butter(x, r): return (1+(x/r)**-4)**-1
def apod_cos(x,r): return (1-np.cos(np.min(1,nx/r)*np.pi))/2
# Read our inputs
freqs = parse_floats(args.freqs)
maps = read_maps(args.maps, len(freqs))
ncomp = maps.shape[-3]
nfreq = maps.shape[-4]
noise = read_noise(args.noise, maps.shape, maps.wcs, len(freqs))
ps = powspec.read_spectrum(args.powspec, expand="diag")[:ncomp,:ncomp]
poss = np.loadtxt(args.posfile)[:,:2]/r2c
R = args.radius/r2c/60
beam_fiducial = 1.5/r2b
beam_range = [0.8/r2b,3.0/r2b]
beam_max_asym = 2
apod_rad = R/10
# We will cut out small mini-maps around each source candadate and
# sample the CMB and source parameters jointly. But some candiates
# are so near each other that they aren't independent. These must
# be grouped into groups.
def build_groups(poss):
def dist(a,b): return np.sum((poss[a]-poss[b])**2)**0.5*180*60/np.pi
rest = set(range(len(poss)))
groups = []
while len(rest) > 0:
group = []
tocheck = [rest.pop()]
# Find distance to all other points
while len(tocheck) > 0:
current = tocheck.pop()
rnew = set()
while rest:
other = rest.pop()
if dist(current,other) < args.mindist_group:
tocheck.append(other)
else:
rnew.add(other)
rest = rnew
group.append(current)
groups.append(group)
return groups
groups = build_groups(poss)
print "Found %d groups" % len(groups)
# We will sample (cmb,A,pos,ibeam) jointly in gibbs fashion:
# cmb,A <- P(cmb,A|data,A,pos,ibeam) # direct, but requires cr
# pos,ibeam <- P(pos,ibeam|data,cmb,A) # MCMC
# To take into account the nonperiodicity of each submap, we must introduce
# a region of extra noise around the edge.
class CMBSampler:
"""Draws samples from P(s,a|d,Cl,N,T), where T[ntemp,nfreq,ncomp,ny,nx] is a set of templates.
a[ntemp] is the set of template amplitudes."""
def __init__(self, maps, inoise, ps, T=None):
self.d = maps
self.iN = inoise
self.hN = en.multi_pow(inoise, 0.5, axes=[1,2])
self.iS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -1.0)
self.hS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -0.5)
self.ps = ps
self.b, self.x = None, None
# Prepare the preconditioner. It approximates the noise as the
# same in every pixel, and ignores the cmb-template coupling.
# See M(self,u) for details.
iN_white = np.array(np.sum(np.mean(np.mean(self.iN,-1),-1),0))
# iN_white is now in pixel space, but the preconditioner needs it
# in harmonic space, which introduces a
#norm = np.prod((maps.box[1]-maps.box[0])/maps.shape[-2:])
#norm = 1./np.prod(maps.shape[-2:])
#iN_white /= norm
self.S_prec = en.multi_pow(self.iS + iN_white[:,:,None,None], -1)
# The template
self.set_template(T)
def set_template(self, T):
if T is None: T = np.zeros((0,)+self.d.shape)
self.T = T
self.TT = np.einsum("aijyx,bijyx->ab",self.T,self.T)
self.dof = DOF(Arg(default=self.d[0]), Arg(shape=T.shape[:1]))
def P(self, u):
s, a = self.dof.unzip(u)
return s[None,:,:,:] + np.sum(self.T*a[:,None,None,None,None],0)
def PT(self, d):
return self.dof.zip(np.sum(d,0), np.einsum("qijyx,ijyx->q",self.T, d))
def A(self, u):
s, a = self.dof.unzip(u)
# U"u = [S"s, 0a]
Uu = self.dof.zip(en.harm2map(en.map_mul(self.iS, en.map2harm(s))),a*0)
# P'N"P u
PNPu = self.PT(en.map_mul(self.iN, self.P(u)))
return Uu + PNPu
def M(self, u):
# Multiplying things out, the full expression for A is:
# [ S" + sum(N") sum(N"T) ]
# [ sum(T'N") sum(T'T) ]
# A reasonable approximation for this is
# [ S" + sum(sigma^{-2}) 0 ]
# [ 0 sum(T'T) ]
# which can be directly inverted.
s, a = self.dof.unzip(u)
# Solve for the cmb signal component
res_s = en.harm2map(en.map_mul(self.S_prec,en.map2harm(s)))
res_a = np.linalg.solve(self.TT, a)
return self.dof.zip(res_s, res_a)
def calc_b(self):
PNd = self.PT(en.map_mul(self.iN, self.d))
Uw1_s = en.harm2map(en.map_mul(self.hS, en.rand_gauss_harm(self.d.shape[-3:],self.d.wcs)))
Uw1_a = np.zeros(self.T.shape[0])
Uw1 = self.dof.zip(Uw1_s, Uw1_a)
PNw2 = self.PT(en.map_mul(self.hN, en.rand_gauss(self.d.shape, self.d.wcs)))
return PNd + Uw1 + PNw2
def solve(self, b, x0, verbose=False):
cg = CG(self.A, b, x0=x0*0, M=self.M)
while cg.err > 1e-6:
cg.step()
if verbose:
print "%5d %15.7e %15.7e" % (cg.i, cg.err, cg.err_true) #, self.dof.unzip(cg.x)[1]
#if cg.i % 10 == 0:
# s, a = self.dof.unzip(cg.x)
# matshow(s[0]); colorbar(); show()
return cg.x
def sample(self, verbose=False):
self.b = self.calc_b()
if self.x is None: self.x = self.dof.zip(self.d[0], np.zeros(self.T.shape[0]))
self.x = self.solve(self.b, self.x, verbose)
return self.dof.unzip(self.x)
class PtsrcModel:
"""This class converts from point source shape parameters to amplitude
basis functions."""
def __init__(self, template):
self.pos = template.posmap()
self.nfreq, self.ncomp = template.shape[:2]
self.nparam = self.nfreq*self.ncomp
def get_templates(self, pos, irads):
x = utils.rewind(self.pos - pos[:,None,None],0,2*np.pi)
W = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
xWx = np.sum(np.einsum("ab,byx->ayx", W, x)*x,0)
profile = np.exp(-0.5*xWx)
bases = np.eye(self.nfreq*self.ncomp).reshape(self.nfreq*self.ncomp,self.nfreq,self.ncomp)
return profile[None,None,None]*bases[:,:,:,None,None]
def get_model(self | raise ValueError("Noise and maps have inconsistent shape!") | conditional_block |
|
ptsrc_gibbs.py | ,cmb,A) # MCMC
# To take into account the nonperiodicity of each submap, we must introduce
# a region of extra noise around the edge.
class CMBSampler:
"""Draws samples from P(s,a|d,Cl,N,T), where T[ntemp,nfreq,ncomp,ny,nx] is a set of templates.
a[ntemp] is the set of template amplitudes."""
def __init__(self, maps, inoise, ps, T=None):
self.d = maps
self.iN = inoise
self.hN = en.multi_pow(inoise, 0.5, axes=[1,2])
self.iS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -1.0)
self.hS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -0.5)
self.ps = ps
self.b, self.x = None, None
# Prepare the preconditioner. It approximates the noise as the
# same in every pixel, and ignores the cmb-template coupling.
# See M(self,u) for details.
iN_white = np.array(np.sum(np.mean(np.mean(self.iN,-1),-1),0))
# iN_white is now in pixel space, but the preconditioner needs it
# in harmonic space, which introduces a
#norm = np.prod((maps.box[1]-maps.box[0])/maps.shape[-2:])
#norm = 1./np.prod(maps.shape[-2:])
#iN_white /= norm
self.S_prec = en.multi_pow(self.iS + iN_white[:,:,None,None], -1)
# The template
self.set_template(T)
def set_template(self, T):
if T is None: T = np.zeros((0,)+self.d.shape)
self.T = T
self.TT = np.einsum("aijyx,bijyx->ab",self.T,self.T)
self.dof = DOF(Arg(default=self.d[0]), Arg(shape=T.shape[:1]))
def P(self, u):
s, a = self.dof.unzip(u)
return s[None,:,:,:] + np.sum(self.T*a[:,None,None,None,None],0)
def PT(self, d):
return self.dof.zip(np.sum(d,0), np.einsum("qijyx,ijyx->q",self.T, d))
def A(self, u):
s, a = self.dof.unzip(u)
# U"u = [S"s, 0a]
Uu = self.dof.zip(en.harm2map(en.map_mul(self.iS, en.map2harm(s))),a*0)
# P'N"P u
PNPu = self.PT(en.map_mul(self.iN, self.P(u)))
return Uu + PNPu
def M(self, u):
# Multiplying things out, the full expression for A is:
# [ S" + sum(N") sum(N"T) ]
# [ sum(T'N") sum(T'T) ]
# A reasonable approximation for this is
# [ S" + sum(sigma^{-2}) 0 ]
# [ 0 sum(T'T) ]
# which can be directly inverted.
s, a = self.dof.unzip(u)
# Solve for the cmb signal component
res_s = en.harm2map(en.map_mul(self.S_prec,en.map2harm(s)))
res_a = np.linalg.solve(self.TT, a)
return self.dof.zip(res_s, res_a)
def calc_b(self):
PNd = self.PT(en.map_mul(self.iN, self.d))
Uw1_s = en.harm2map(en.map_mul(self.hS, en.rand_gauss_harm(self.d.shape[-3:],self.d.wcs)))
Uw1_a = np.zeros(self.T.shape[0])
Uw1 = self.dof.zip(Uw1_s, Uw1_a)
PNw2 = self.PT(en.map_mul(self.hN, en.rand_gauss(self.d.shape, self.d.wcs)))
return PNd + Uw1 + PNw2
def solve(self, b, x0, verbose=False):
cg = CG(self.A, b, x0=x0*0, M=self.M)
while cg.err > 1e-6:
cg.step()
if verbose:
print "%5d %15.7e %15.7e" % (cg.i, cg.err, cg.err_true) #, self.dof.unzip(cg.x)[1]
#if cg.i % 10 == 0:
# s, a = self.dof.unzip(cg.x)
# matshow(s[0]); colorbar(); show()
return cg.x
def sample(self, verbose=False):
self.b = self.calc_b()
if self.x is None: self.x = self.dof.zip(self.d[0], np.zeros(self.T.shape[0]))
self.x = self.solve(self.b, self.x, verbose)
return self.dof.unzip(self.x)
class PtsrcModel:
"""This class converts from point source shape parameters to amplitude
basis functions."""
def __init__(self, template):
self.pos = template.posmap()
self.nfreq, self.ncomp = template.shape[:2]
self.nparam = self.nfreq*self.ncomp
def get_templates(self, pos, irads):
x = utils.rewind(self.pos - pos[:,None,None],0,2*np.pi)
W = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
xWx = np.sum(np.einsum("ab,byx->ayx", W, x)*x,0)
profile = np.exp(-0.5*xWx)
bases = np.eye(self.nfreq*self.ncomp).reshape(self.nfreq*self.ncomp,self.nfreq,self.ncomp)
return profile[None,None,None]*bases[:,:,:,None,None]
def get_model(self, amps, pos, irads):
return np.sum((self.get_templates(pos, irads).T*amps.T).T,0)
class ShapeSampler:
def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=200, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.maps = maps
self.inoise = inoise
self.model= model
self.nsamp= nsamp
self.stepsize = stepsize
self.amps = amps
self.pos, self.irads = pos, irads
self.pos0 = pos0
self.maxdist=maxdist
self.lik = self.getlik(self.amps, self.pos, self.irads)
def getlik(self, amps, pos, irads):
if irads[0] < 0 or irads[1] < 0: return np.inf
if irads[0]*irads[1]-irads[2]**2 <= 0: return np.inf
sigma, phi = expand_beam(irads)
# The beam has a tendency to run off in unrealistic directions,
# so we need a relatively strong prior on it.
if np.min(sigma) < beam_range[0] or np.max(sigma) > beam_range[1] or np.max(sigma)/np.min(sigma) > beam_max_asym: return np.inf
template = self.model.get_model(amps, pos, irads)
residual = self.maps-template
tmp = np.einsum("fabyx,abyx->fayx",self.inoise, residual)
deviation = np.sum((pos-self.pos0)**2)**0.5/self.maxdist
penalty = 1+max(deviation-1,0)**2
return 0.5*np.sum(tmp*residual)*penalty
def newpos(self, pos):
# Draw pos with gaussian prior centered on previous position
# With a width given by the fiducial beam size.
step = self.stepsize
if np.random.uniform() < 0.1: step*100 # Sometimes try larger steps to break out of ruts
return pos + np.random.standard_normal(2) * beam_fiducial * self.stepsize
def newshape(self, irads):
return irads + np.random.standard_normal(3) * 1.0/beam_fiducial**2 * self.stepsize * 0.5
def newamp(self, amps):
return amps + np.random.standard_normal(len(amps)) * 1000 * self.stepsize
def subsample(self, verbose=False):
pos = self.newpos(self.pos)
lik = self.getlik(self.amps, pos, self.irads) | if np.random.uniform() < np.exp(self.lik-lik):
self.pos, self.lik = pos, lik
irads = self.newshape(self.irads) | random_line_split |
|
day20.rs | );
assert_eq!(invert(512), 1);
assert_eq!(invert(2), 256);
}
}
impl std::fmt::Display for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for line in self.rows.iter() {
if first {
first = false;
} else {
f.write_str("\n")?;
}
f.write_str(&line_to_string(*line))?;
}
Ok(())
}
}
impl Tile {
fn parse(input: &str) -> Tile {
let mut lines = input.lines();
let id = Tile::parse_id(lines.next().unwrap());
let rows = lines.map(|line| parse_line(line) as Line).collect();
Tile {
id,
rows,
..Tile::default()
}
}
fn parse_id(line: &str) -> usize {
assert!(line.starts_with("Tile "));
assert!(line.ends_with(':'));
line[5..SIZE - 1].parse().unwrap()
}
fn north_edge(self: &Self) -> Line { |
fn west_edge(self: &Self) -> Line {
self.rotate_cw().rows[0]
}
fn east_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rotate_cw().rows[0]
}
fn south_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rows[0]
}
fn get_edges(self: &Self) -> [Line; 4] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
[
self.north_edge(),
rot1.north_edge(),
rot2.north_edge(),
rot3.north_edge(),
]
}
fn rotate_cw(self: &Self) -> Tile {
let mut rows: Vec<Line> = Vec::new();
for i in 0..SIZE {
let mut line = 0;
for j in 0..SIZE {
line = (line << 1) | ((self.rows[SIZE - j - 1] >> (SIZE - i - 1)) & 1);
}
rows.push(line);
}
Tile {
id: self.id,
rows,
flipped: self.flipped,
rotated: (self.rotated + 1) % 4,
// links: [self.links[3], self.links[0], self.links[1], self.links[2]],
}
}
fn mirror_vertical(self: &Self) -> Tile {
Tile {
id: self.id,
rows: self.rows.iter().cloned().rev().collect(),
flipped: !self.flipped,
rotated: 0,
// links: [self.links[2], self.links[1], self.links[0], self.links[3]],
}
}
// Builds all 8 variants of rotations + flippity.
fn make_variants(self: &Self) -> [Self; 8] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
let flip0 = self.mirror_vertical();
let flip1 = flip0.rotate_cw();
let flip2 = flip1.rotate_cw();
let flip3 = flip2.rotate_cw();
[self.clone(), rot1, rot2, rot3, flip0, flip1, flip2, flip3]
}
}
// All tiles and various helper structs.
#[derive(Debug)]
struct TileBag {
// Mapping from tile id to tile. These tiles will get rotated once we start linking them
// together.
tiles: HashMap<usize, Tile>,
// Mapping from edges to a list of Tiles, rotated such that the top edge is used as the key.
// Each Tile here repeats 8 times with all of its variants.
edges: HashMap<Line, Vec<Tile>>,
// Once we perform assembly, this is where we store the tiles, first rows, then columns.
assembled: Vec<Vec<Tile>>,
}
struct MergedTiles {
rows: Vec<u128>,
}
impl TileBag {
fn parse(input: &str) -> TileBag {
let tiles = input
.split("\n\n")
.map(|tile_lines| {
let t = Tile::parse(tile_lines);
(t.id, t)
})
.collect();
let mut out = TileBag {
tiles,
edges: HashMap::new(),
assembled: Vec::new(),
};
out.build_edges();
out
}
fn build_edges(self: &mut Self) {
for t in self.tiles.values() {
for tt in t.make_variants().iter() {
self.edges
.entry(tt.north_edge())
.or_insert_with(|| Vec::new())
.push(tt.clone());
}
}
}
// Counts how many entries we have in edge map ignoring given id.
fn count_edges(self: &Self, edge: &Line, id_to_ignore: usize) -> usize {
let mut cnt = 0;
for other_edge in self.edges.get(edge).unwrap() {
// Don't count ourselves.
if other_edge.id != id_to_ignore {
cnt += 1;
}
}
cnt
}
// Calculates how many other tile edges this given tile can link to.
fn linked_tiles(self: &Self, tile: &Tile) -> usize {
let mut cnt = 0;
for edge in tile.get_edges().iter() {
cnt += self.count_edges(edge, tile.id);
}
cnt
}
// Finds corner tiles - tiles with only two other tiles linked.
fn find_corners(self: &Self) -> Vec<Tile> {
let corners = self
.tiles
.values()
.filter_map(|t| {
if self.linked_tiles(t) == 2 {
Some(t.clone())
} else {
None
}
})
.collect::<Vec<Tile>>();
assert_eq!(corners.len(), 4);
corners
}
fn orient_starting_tile(self: &Self, tile: &Tile) -> Tile {
for t in tile.make_variants().iter() {
if self.count_edges(&t.north_edge(), tile.id) == 0
&& self.count_edges(&t.west_edge(), tile.id) == 0
{
return t.clone();
}
}
panic!();
}
fn get_tile_for_edge(self: &Self, edge: Line, id_to_ignore: usize) -> Option<Tile> {
let edge_tiles = self.edges.get(&edge).unwrap();
if edge_tiles.len() != 2 {
return None;
}
for tile in edge_tiles.iter() {
if tile.id != id_to_ignore {
return Some(tile.clone());
}
}
panic!("shouldn't get here");
}
// Finds a tile that matches given east edge, ignoring given tile (so to not match ourselves).
fn find_east_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
Some(
self.get_tile_for_edge(invert(tile.east_edge()), tile.id)?
.rotate_cw()
.rotate_cw()
.rotate_cw(),
)
}
// Finds a tile that matches given south edge, ignoring given tile (so to not match ourselves).
fn find_south_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
self.get_tile_for_edge(invert(tile.south_edge()), tile.id)
}
// Fills in .assembled with all tiles, rotating/flipping them as needed.
fn assemble(self: &mut Self) {
// Pick one of the corner tiles to start with. Doesn't matter which, so we'll pick the last
// one. Rotate the tile so that it is in top-left corner of the assembled picture (only
// east and south links are used).
let mut tile = self.orient_starting_tile(&self.find_corners().pop().unwrap());
loop {
self.assembled.push(Vec::new());
loop {
// println!("{}\n", tile);
self.assembled.last_mut().unwrap().push(tile.clone());
match self.find_east_neighbor(&tile) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
// Go to next row. Find the south neighbor of the first tile from previous row.
match self.find_south_neighbor(&self.assembled.last().unwrap()[0]) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
}
// Takes self.assembled and turns it into a giant quilt.
fn merge(self: &mut Self) -> MergedTiles {
const SIZE_INNER: usize = SIZE - 2;
let quilt_side_tiles = self.assembled.len();
let mut out: Vec<u128> = Vec::new();
for _ in 0..(self.assembled.len() * SIZE_INNER) {
out.push(0);
}
for (tile_row_idx, tile_row) in self.assembled.iter | self.rows[0]
} | random_line_split |
day20.rs | 0, flip1, flip2, flip3]
}
}
// All tiles and various helper structs.
#[derive(Debug)]
struct TileBag {
// Mapping from tile id to tile. These tiles will get rotated once we start linking them
// together.
tiles: HashMap<usize, Tile>,
// Mapping from edges to a list of Tiles, rotated such that the top edge is used as the key.
// Each Tile here repeats 8 times with all of its variants.
edges: HashMap<Line, Vec<Tile>>,
// Once we perform assembly, this is where we store the tiles, first rows, then columns.
assembled: Vec<Vec<Tile>>,
}
struct MergedTiles {
rows: Vec<u128>,
}
impl TileBag {
fn parse(input: &str) -> TileBag {
let tiles = input
.split("\n\n")
.map(|tile_lines| {
let t = Tile::parse(tile_lines);
(t.id, t)
})
.collect();
let mut out = TileBag {
tiles,
edges: HashMap::new(),
assembled: Vec::new(),
};
out.build_edges();
out
}
fn build_edges(self: &mut Self) {
for t in self.tiles.values() {
for tt in t.make_variants().iter() {
self.edges
.entry(tt.north_edge())
.or_insert_with(|| Vec::new())
.push(tt.clone());
}
}
}
// Counts how many entries we have in edge map ignoring given id.
fn count_edges(self: &Self, edge: &Line, id_to_ignore: usize) -> usize {
let mut cnt = 0;
for other_edge in self.edges.get(edge).unwrap() {
// Don't count ourselves.
if other_edge.id != id_to_ignore {
cnt += 1;
}
}
cnt
}
// Calculates how many other tile edges this given tile can link to.
fn linked_tiles(self: &Self, tile: &Tile) -> usize {
let mut cnt = 0;
for edge in tile.get_edges().iter() {
cnt += self.count_edges(edge, tile.id);
}
cnt
}
// Finds corner tiles - tiles with only two other tiles linked.
fn find_corners(self: &Self) -> Vec<Tile> {
let corners = self
.tiles
.values()
.filter_map(|t| {
if self.linked_tiles(t) == 2 {
Some(t.clone())
} else {
None
}
})
.collect::<Vec<Tile>>();
assert_eq!(corners.len(), 4);
corners
}
fn orient_starting_tile(self: &Self, tile: &Tile) -> Tile {
for t in tile.make_variants().iter() {
if self.count_edges(&t.north_edge(), tile.id) == 0
&& self.count_edges(&t.west_edge(), tile.id) == 0
{
return t.clone();
}
}
panic!();
}
fn get_tile_for_edge(self: &Self, edge: Line, id_to_ignore: usize) -> Option<Tile> {
let edge_tiles = self.edges.get(&edge).unwrap();
if edge_tiles.len() != 2 {
return None;
}
for tile in edge_tiles.iter() {
if tile.id != id_to_ignore {
return Some(tile.clone());
}
}
panic!("shouldn't get here");
}
// Finds a tile that matches given east edge, ignoring given tile (so to not match ourselves).
fn find_east_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
Some(
self.get_tile_for_edge(invert(tile.east_edge()), tile.id)?
.rotate_cw()
.rotate_cw()
.rotate_cw(),
)
}
// Finds a tile that matches given south edge, ignoring given tile (so to not match ourselves).
fn find_south_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
self.get_tile_for_edge(invert(tile.south_edge()), tile.id)
}
// Fills in .assembled with all tiles, rotating/flipping them as needed.
fn assemble(self: &mut Self) {
// Pick one of the corner tiles to start with. Doesn't matter which, so we'll pick the last
// one. Rotate the tile so that it is in top-left corner of the assembled picture (only
// east and south links are used).
let mut tile = self.orient_starting_tile(&self.find_corners().pop().unwrap());
loop {
self.assembled.push(Vec::new());
loop {
// println!("{}\n", tile);
self.assembled.last_mut().unwrap().push(tile.clone());
match self.find_east_neighbor(&tile) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
// Go to next row. Find the south neighbor of the first tile from previous row.
match self.find_south_neighbor(&self.assembled.last().unwrap()[0]) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
}
// Takes self.assembled and turns it into a giant quilt.
fn merge(self: &mut Self) -> MergedTiles {
const SIZE_INNER: usize = SIZE - 2;
let quilt_side_tiles = self.assembled.len();
let mut out: Vec<u128> = Vec::new();
for _ in 0..(self.assembled.len() * SIZE_INNER) {
out.push(0);
}
for (tile_row_idx, tile_row) in self.assembled.iter().enumerate() {
for (tile_col_idx, tile) in tile_row.iter().enumerate() {
for (tile_row2_idx, row) in tile.rows[1..tile.rows.len() - 1].iter().enumerate() {
let out_row = tile_row_idx * SIZE_INNER + tile_row2_idx;
// dbg!(&out_row);
let out_shift = (quilt_side_tiles - tile_col_idx - 1) * SIZE_INNER;
// dbg!(&out_shift);
out[out_row] |= (((*row as u128) >> 1) & 0xff) << out_shift;
}
}
}
MergedTiles { rows: out }
}
}
type MonsterPattern = [u128; 3];
const MONSTER_WIDTH: usize = 20;
fn make_sea_monster_pattern() -> MonsterPattern {
[
parse_line("..................#."),
parse_line("#....##....##....###"),
parse_line(".#..#..#..#..#..#..."),
]
}
impl MergedTiles {
// Counts number of sea monsters and tiles without sea monsters on them.
fn count_sea_monsters(self: &mut Self, monster: &MonsterPattern) -> (usize, usize) {
let mut cnt = 0;
for r in 0..(self.rows.len() - monster.len()) {
for c in 0..(self.rows.len() - MONSTER_WIDTH) {
if self.is_sea_monster_at(monster, r, c) {
cnt += 1;
self.remove_sea_monster_at(monster, r, c);
}
}
}
let mut other = 0;
if cnt > 0 {
for r in self.rows.iter() {
let mut i: u128 = *r;
while i > 0 {
if (i & 1) == 1 {
other += 1;
}
i >>= 1;
}
}
}
(cnt, other)
}
fn remove_sea_monster_at(self: &mut Self, monster: &MonsterPattern, row: usize, col: usize) {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if (monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1 {
self.rows[row + r] &= !(1 << (c + col));
}
}
}
}
fn is_sea_monster_at(self: &Self, monster: &MonsterPattern, row: usize, col: usize) -> bool {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if ((monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1)
&& ((self.rows[row + r] >> (c + col)) & 1 == 0)
{
return false;
}
}
}
true
}
fn rotate_cw(self: &Self) -> MergedTiles {
let mut rows: Vec<u128> = Vec::new();
for i in 0..self.rows.len() {
let mut line = 0;
for j in 0..self.rows.len() {
line = (line << 1)
| ((self.rows[self.rows.len() - j - 1] >> (self.rows.len() - i - 1)) & 1);
}
rows.push(line);
}
MergedTiles { rows }
}
fn | mirror_vertical | identifier_name |
|
day20.rs |
#[test]
fn test_invert() {
assert_eq!(invert(1), 512);
assert_eq!(invert(512), 1);
assert_eq!(invert(2), 256);
}
}
impl std::fmt::Display for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for line in self.rows.iter() {
if first {
first = false;
} else {
f.write_str("\n")?;
}
f.write_str(&line_to_string(*line))?;
}
Ok(())
}
}
impl Tile {
fn parse(input: &str) -> Tile {
let mut lines = input.lines();
let id = Tile::parse_id(lines.next().unwrap());
let rows = lines.map(|line| parse_line(line) as Line).collect();
Tile {
id,
rows,
..Tile::default()
}
}
fn parse_id(line: &str) -> usize {
assert!(line.starts_with("Tile "));
assert!(line.ends_with(':'));
line[5..SIZE - 1].parse().unwrap()
}
fn north_edge(self: &Self) -> Line {
self.rows[0]
}
fn west_edge(self: &Self) -> Line {
self.rotate_cw().rows[0]
}
fn east_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rotate_cw().rows[0]
}
fn south_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rows[0]
}
fn get_edges(self: &Self) -> [Line; 4] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
[
self.north_edge(),
rot1.north_edge(),
rot2.north_edge(),
rot3.north_edge(),
]
}
fn rotate_cw(self: &Self) -> Tile {
let mut rows: Vec<Line> = Vec::new();
for i in 0..SIZE {
let mut line = 0;
for j in 0..SIZE {
line = (line << 1) | ((self.rows[SIZE - j - 1] >> (SIZE - i - 1)) & 1);
}
rows.push(line);
}
Tile {
id: self.id,
rows,
flipped: self.flipped,
rotated: (self.rotated + 1) % 4,
// links: [self.links[3], self.links[0], self.links[1], self.links[2]],
}
}
fn mirror_vertical(self: &Self) -> Tile {
Tile {
id: self.id,
rows: self.rows.iter().cloned().rev().collect(),
flipped: !self.flipped,
rotated: 0,
// links: [self.links[2], self.links[1], self.links[0], self.links[3]],
}
}
// Builds all 8 variants of rotations + flippity.
fn make_variants(self: &Self) -> [Self; 8] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
let flip0 = self.mirror_vertical();
let flip1 = flip0.rotate_cw();
let flip2 = flip1.rotate_cw();
let flip3 = flip2.rotate_cw();
[self.clone(), rot1, rot2, rot3, flip0, flip1, flip2, flip3]
}
}
// All tiles and various helper structs.
#[derive(Debug)]
struct TileBag {
// Mapping from tile id to tile. These tiles will get rotated once we start linking them
// together.
tiles: HashMap<usize, Tile>,
// Mapping from edges to a list of Tiles, rotated such that the top edge is used as the key.
// Each Tile here repeats 8 times with all of its variants.
edges: HashMap<Line, Vec<Tile>>,
// Once we perform assembly, this is where we store the tiles, first rows, then columns.
assembled: Vec<Vec<Tile>>,
}
struct MergedTiles {
rows: Vec<u128>,
}
impl TileBag {
fn parse(input: &str) -> TileBag {
let tiles = input
.split("\n\n")
.map(|tile_lines| {
let t = Tile::parse(tile_lines);
(t.id, t)
})
.collect();
let mut out = TileBag {
tiles,
edges: HashMap::new(),
assembled: Vec::new(),
};
out.build_edges();
out
}
fn build_edges(self: &mut Self) {
for t in self.tiles.values() {
for tt in t.make_variants().iter() {
self.edges
.entry(tt.north_edge())
.or_insert_with(|| Vec::new())
.push(tt.clone());
}
}
}
// Counts how many entries we have in edge map ignoring given id.
fn count_edges(self: &Self, edge: &Line, id_to_ignore: usize) -> usize {
let mut cnt = 0;
for other_edge in self.edges.get(edge).unwrap() {
// Don't count ourselves.
if other_edge.id != id_to_ignore {
cnt += 1;
}
}
cnt
}
// Calculates how many other tile edges this given tile can link to.
fn linked_tiles(self: &Self, tile: &Tile) -> usize {
let mut cnt = 0;
for edge in tile.get_edges().iter() {
cnt += self.count_edges(edge, tile.id);
}
cnt
}
// Finds corner tiles - tiles with only two other tiles linked.
fn find_corners(self: &Self) -> Vec<Tile> {
let corners = self
.tiles
.values()
.filter_map(|t| {
if self.linked_tiles(t) == 2 {
Some(t.clone())
} else {
None
}
})
.collect::<Vec<Tile>>();
assert_eq!(corners.len(), 4);
corners
}
fn orient_starting_tile(self: &Self, tile: &Tile) -> Tile {
for t in tile.make_variants().iter() {
if self.count_edges(&t.north_edge(), tile.id) == 0
&& self.count_edges(&t.west_edge(), tile.id) == 0
{
return t.clone();
}
}
panic!();
}
fn get_tile_for_edge(self: &Self, edge: Line, id_to_ignore: usize) -> Option<Tile> {
let edge_tiles = self.edges.get(&edge).unwrap();
if edge_tiles.len() != 2 {
return None;
}
for tile in edge_tiles.iter() {
if tile.id != id_to_ignore {
return Some(tile.clone());
}
}
panic!("shouldn't get here");
}
// Finds a tile that matches given east edge, ignoring given tile (so to not match ourselves).
fn find_east_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
Some(
self.get_tile_for_edge(invert(tile.east_edge()), tile.id)?
.rotate_cw()
.rotate_cw()
.rotate_cw(),
)
}
// Finds a tile that matches given south edge, ignoring given tile (so to not match ourselves).
fn find_south_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
self.get_tile_for_edge(invert(tile.south_edge()), tile.id)
}
// Fills in .assembled with all tiles, rotating/flipping them as needed.
fn assemble(self: &mut Self) {
// Pick one of the corner tiles to start with. Doesn't matter which, so we'll pick the last
// one. Rotate the tile so that it is in top-left corner of the assembled picture (only
// east and south links are used).
let mut tile = self.orient_starting_tile(&self.find_corners().pop().unwrap());
loop {
self.assembled.push(Vec::new());
loop {
// println!("{}\n", tile);
self.assembled.last_mut().unwrap().push(tile.clone());
match self.find_east_neighbor(&tile) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
// Go to next row. Find the south neighbor of the first tile from previous row.
match self.find_south_neighbor(&self.assembled.last().unwrap()[0]) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
}
// Takes self.assembled and turns it into a giant quilt.
fn merge(self: &mut Self) -> MergedTiles {
const SIZE_INNER: usize = SIZE - 2;
let quilt_side_tiles = self.assembled.len();
| {
assert_eq!(line_to_string(1), ".........#");
assert_eq!(line_to_string(391), ".##....###");
} | identifier_body |
|
noise.go | to return
}
func (r returnErrReader) Read([]byte) (int, error) { return 0, r.err }
// Read is basically the same as controlbase.Conn.Read, but it first reads the
// "early payload" header from the server which may or may not be present,
// depending on the server.
func (c *noiseConn) Read(p []byte) (n int, err error) {
c.readHeaderOnce.Do(c.readHeader)
return c.reader.Read(p)
}
// readHeader reads the optional "early payload" from the server that arrives
// after the Noise handshake but before the HTTP/2 session begins.
//
// readHeader is responsible for reading the header (if present), initializing
// c.earlyPayload, closing c.earlyPayloadReady, and initializing c.reader for
// future reads.
func (c *noiseConn) readHeader() {
defer close(c.earlyPayloadReady)
setErr := func(err error) {
c.reader = returnErrReader{err}
c.earlyPayloadErr = err
}
var hdr [hdrLen]byte
if _, err := io.ReadFull(c.Conn, hdr[:]); err != nil {
setErr(err)
return
}
if string(hdr[:len(earlyPayloadMagic)]) != earlyPayloadMagic {
// No early payload. We have to return the 9 bytes read we already
// consumed.
c.reader = io.MultiReader(bytes.NewReader(hdr[:]), c.Conn)
return
}
epLen := binary.BigEndian.Uint32(hdr[len(earlyPayloadMagic):])
if epLen > 10<<20 {
setErr(errors.New("invalid early payload length"))
return
}
payBuf := make([]byte, epLen)
if _, err := io.ReadFull(c.Conn, payBuf); err != nil {
setErr(err)
return
}
if err := json.Unmarshal(payBuf, &c.earlyPayload); err != nil {
setErr(err)
return
}
c.reader = c.Conn
}
func (c *noiseConn) Close() error {
if err := c.Conn.Close(); err != nil {
return err
}
c.pool.connClosed(c.id)
return nil
}
// NoiseClient provides a http.Client to connect to tailcontrol over
// the ts2021 protocol.
type NoiseClient struct {
// Client is an HTTP client to talk to the coordination server.
// It automatically makes a new Noise connection as needed.
// It does not support node key proofs. To do that, call
// noiseClient.getConn instead to make a connection.
*http.Client
// h2t is the HTTP/2 transport we use a bit to create new
// *http2.ClientConns. We don't use its connection pool and we don't use its
// dialing. We use it for exactly one reason: its idle timeout that can only
// be configured via the HTTP/1 config. And then we call NewClientConn (with
// an existing Noise connection) on the http2.Transport which sets up an
// http2.ClientConn using that idle timeout from an http1.Transport.
h2t *http2.Transport
// sfDial ensures that two concurrent requests for a noise connection only
// produce one shared one between the two callers.
sfDial singleflight.Group[struct{}, *noiseConn]
dialer *tsdial.Dialer
dnsCache *dnscache.Resolver
privKey key.MachinePrivate
serverPubKey key.MachinePublic
host string // the host part of serverURL
httpPort string // the default port to call
httpsPort string // the fallback Noise-over-https port
// dialPlan optionally returns a ControlDialPlan previously received
// from the control server; either the function or the return value can
// be nil.
dialPlan func() *tailcfg.ControlDialPlan
logf logger.Logf
netMon *netmon.Monitor
// mu only protects the following variables.
mu sync.Mutex
last *noiseConn // or nil
nextID int
connPool map[int]*noiseConn // active connections not yet closed; see noiseConn.Close
}
// NoiseOpts contains options for the NewNoiseClient function. All fields are
// required unless otherwise specified.
type NoiseOpts struct {
// PrivKey is this node's private key.
PrivKey key.MachinePrivate
// ServerPubKey is the public key of the server.
ServerPubKey key.MachinePublic
// ServerURL is the URL of the server to connect to.
ServerURL string
// Dialer's SystemDial function is used to connect to the server.
Dialer *tsdial.Dialer
// DNSCache is the caching Resolver to use to connect to the server.
//
// This field can be nil.
DNSCache *dnscache.Resolver
// Logf is the log function to use. This field can be nil.
Logf logger.Logf
// NetMon is the network monitor that, if set, will be used to get the
// network interface state. This field can be nil; if so, the current
// state will be looked up dynamically.
NetMon *netmon.Monitor
// DialPlan, if set, is a function that should return an explicit plan
// on how to connect to the server.
DialPlan func() *tailcfg.ControlDialPlan
}
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://<host>:<port> (no trailing slash).
//
// netMon may be nil, if non-nil it's used to do faster interface lookups.
// dialPlan may be nil
func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
u, err := url.Parse(opts.ServerURL)
if err != nil {
return nil, err
}
var httpPort string
var httpsPort string
if u.Port() != "" {
// If there is an explicit port specified, trust the scheme and hope for the best
if u.Scheme == "http" {
httpPort = u.Port()
httpsPort = "443"
} else {
httpPort = "80"
httpsPort = u.Port()
}
} else {
// Otherwise, use the standard ports
httpPort = "80"
httpsPort = "443"
}
np := &NoiseClient{
serverPubKey: opts.ServerPubKey,
privKey: opts.PrivKey,
host: u.Hostname(),
httpPort: httpPort,
httpsPort: httpsPort,
dialer: opts.Dialer,
dnsCache: opts.DNSCache,
dialPlan: opts.DialPlan,
logf: opts.Logf,
netMon: opts.NetMon,
}
// Create the HTTP/2 Transport using a net/http.Transport
// (which only does HTTP/1) because it's the only way to
// configure certain properties on the http2.Transport. But we
// never actually use the net/http.Transport for any HTTP/1
// requests.
h2Transport, err := http2.ConfigureTransports(&http.Transport{
IdleConnTimeout: time.Minute,
})
if err != nil {
return nil, err
}
np.h2t = h2Transport
np.Client = &http.Client{Transport: np}
return np, nil
}
// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once
// (and must be used once) to make a single HTTP request over the noise channel
// to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
for tries := 0; tries < 3; tries++ |
return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection")
}
// contextErr is an error that wraps another error and is used to indicate that
// the error was because a context expired.
type contextErr struct {
err error
}
func (e contextErr) Error() string {
return e.err.Error()
}
func (e contextErr) Unwrap() error {
return e.err
}
// getConn returns a noiseConn that can be used to make requests to the
// coordination server. It may return a cached connection or create a new one.
// Dials are singleflighted, so concurrent calls to getConn may only dial once.
// As such, context values may not be respected as there are no guarantees that
// the context passed to getConn is the same as the context passed to dial.
func (nc *NoiseClient) getConn(ctx context.Context) (*noiseConn, error) {
nc.mu.Lock()
if last := nc.last; last | {
conn, err := nc.getConn(ctx)
if err != nil {
return nil, nil, err
}
earlyPayloadMaybeNil, err := conn.getEarlyPayload(ctx)
if err != nil {
return nil, nil, err
}
if conn.h2cc.ReserveNewRequest() {
return conn, earlyPayloadMaybeNil, nil
}
} | conditional_block |
noise.go | if set, is a function that should return an explicit plan
// on how to connect to the server.
DialPlan func() *tailcfg.ControlDialPlan
}
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://<host>:<port> (no trailing slash).
//
// netMon may be nil, if non-nil it's used to do faster interface lookups.
// dialPlan may be nil
func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
u, err := url.Parse(opts.ServerURL)
if err != nil {
return nil, err
}
var httpPort string
var httpsPort string
if u.Port() != "" {
// If there is an explicit port specified, trust the scheme and hope for the best
if u.Scheme == "http" {
httpPort = u.Port()
httpsPort = "443"
} else {
httpPort = "80"
httpsPort = u.Port()
}
} else {
// Otherwise, use the standard ports
httpPort = "80"
httpsPort = "443"
}
np := &NoiseClient{
serverPubKey: opts.ServerPubKey,
privKey: opts.PrivKey,
host: u.Hostname(),
httpPort: httpPort,
httpsPort: httpsPort,
dialer: opts.Dialer,
dnsCache: opts.DNSCache,
dialPlan: opts.DialPlan,
logf: opts.Logf,
netMon: opts.NetMon,
}
// Create the HTTP/2 Transport using a net/http.Transport
// (which only does HTTP/1) because it's the only way to
// configure certain properties on the http2.Transport. But we
// never actually use the net/http.Transport for any HTTP/1
// requests.
h2Transport, err := http2.ConfigureTransports(&http.Transport{
IdleConnTimeout: time.Minute,
})
if err != nil {
return nil, err
}
np.h2t = h2Transport
np.Client = &http.Client{Transport: np}
return np, nil
}
// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once
// (and must be used once) to make a single HTTP request over the noise channel
// to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
for tries := 0; tries < 3; tries++ {
conn, err := nc.getConn(ctx)
if err != nil {
return nil, nil, err
}
earlyPayloadMaybeNil, err := conn.getEarlyPayload(ctx)
if err != nil {
return nil, nil, err
}
if conn.h2cc.ReserveNewRequest() {
return conn, earlyPayloadMaybeNil, nil
}
}
return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection")
}
// contextErr is an error that wraps another error and is used to indicate that
// the error was because a context expired.
type contextErr struct {
err error
}
func (e contextErr) Error() string {
return e.err.Error()
}
func (e contextErr) Unwrap() error {
return e.err
}
// getConn returns a noiseConn that can be used to make requests to the
// coordination server. It may return a cached connection or create a new one.
// Dials are singleflighted, so concurrent calls to getConn may only dial once.
// As such, context values may not be respected as there are no guarantees that
// the context passed to getConn is the same as the context passed to dial.
func (nc *NoiseClient) getConn(ctx context.Context) (*noiseConn, error) {
nc.mu.Lock()
if last := nc.last; last != nil && last.canTakeNewRequest() {
nc.mu.Unlock()
return last, nil
}
nc.mu.Unlock()
for {
// We singeflight the dial to avoid making multiple connections, however
// that means that we can't simply cancel the dial if the context is
// canceled. Instead, we have to additionally check that the context
// which was canceled is our context and retry if our context is still
// valid.
conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseConn, error) {
c, err := nc.dial(ctx)
if err != nil {
if ctx.Err() != nil {
return nil, contextErr{ctx.Err()}
}
return nil, err
}
return c, nil
})
var ce contextErr
if err == nil || !errors.As(err, &ce) {
return conn, err
}
if ctx.Err() == nil {
// The dial failed because of a context error, but our context
// is still valid. Retry.
continue
}
// The dial failed because our context was canceled. Return the
// underlying error.
return nil, ce.Unwrap()
}
}
func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) {
ctx := req.Context()
conn, err := nc.getConn(ctx)
if err != nil {
return nil, err
}
return conn.RoundTrip(req)
}
// connClosed removes the connection with the provided ID from the pool
// of active connections.
func (nc *NoiseClient) connClosed(id int) {
nc.mu.Lock()
defer nc.mu.Unlock()
conn := nc.connPool[id]
if conn != nil {
delete(nc.connPool, id)
if nc.last == conn {
nc.last = nil
}
}
}
// Close closes all the underlying noise connections.
// It is a no-op and returns nil if the connection is already closed.
func (nc *NoiseClient) Close() error {
nc.mu.Lock()
conns := nc.connPool
nc.connPool = nil
nc.mu.Unlock()
var errors []error
for _, c := range conns {
if err := c.Close(); err != nil {
errors = append(errors, err)
}
}
return multierr.New(errors...)
}
// dial opens a new connection to tailcontrol, fetching the server noise key
// if not cached.
func (nc *NoiseClient) dial(ctx context.Context) (*noiseConn, error) {
nc.mu.Lock()
connID := nc.nextID
nc.nextID++
nc.mu.Unlock()
if tailcfg.CurrentCapabilityVersion > math.MaxUint16 {
// Panic, because a test should have started failing several
// thousand version numbers before getting to this point.
panic("capability version is too high to fit in the wire protocol")
}
var dialPlan *tailcfg.ControlDialPlan
if nc.dialPlan != nil {
dialPlan = nc.dialPlan()
}
// If we have a dial plan, then set our timeout as slightly longer than
// the maximum amount of time contained therein; we assume that
// explicit instructions on timeouts are more useful than a single
// hard-coded timeout.
//
// The default value of 5 is chosen so that, when there's no dial plan,
// we retain the previous behaviour of 10 seconds end-to-end timeout.
timeoutSec := 5.0
if dialPlan != nil {
for _, c := range dialPlan.Candidates {
if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec {
timeoutSec = v
}
}
}
// After we establish a connection, we need some time to actually
// upgrade it into a Noise connection. With a ballpark worst-case RTT
// of 1000ms, give ourselves an extra 5 seconds to complete the
// handshake.
timeoutSec += 5
// Be extremely defensive and ensure that the timeout is in the range
// [5, 60] seconds (e.g. if we accidentally get a negative number).
if timeoutSec > 60 {
timeoutSec = 60
} else if timeoutSec < 5 {
timeoutSec = 5
}
timeout := time.Duration(timeoutSec * float64(time.Second))
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
clientConn, err := (&controlhttp.Dialer{
Hostname: nc.host,
HTTPPort: nc.httpPort,
HTTPSPort: nc.httpsPort,
MachineKey: nc.privKey,
ControlKey: nc.serverPubKey,
ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion),
Dialer: nc.dialer.SystemDial,
DNSCache: nc.dnsCache,
DialPlan: dialPlan,
Logf: nc.logf,
NetMon: nc.netMon,
Clock: tstime.StdClock{}, | }).Dial(ctx)
if err != nil {
return nil, err | random_line_split |
|
noise.go |
// The first 9 bytes from the server to client over Noise are either an HTTP/2
// settings frame (a normal HTTP/2 setup) or, as we added later, an "early payload"
// header that's also 9 bytes long: 5 bytes (earlyPayloadMagic) followed by 4 bytes
// of length. Then that many bytes of JSON-encoded tailcfg.EarlyNoise.
// The early payload is optional. Some servers may not send it.
const (
hdrLen = 9 // http2 frame header size; also size of our early payload size header
earlyPayloadMagic = "\xff\xff\xffTS"
)
// returnErrReader is an io.Reader that always returns an error.
type returnErrReader struct {
err error // the error to return
}
func (r returnErrReader) Read([]byte) (int, error) { return 0, r.err }
// Read is basically the same as controlbase.Conn.Read, but it first reads the
// "early payload" header from the server which may or may not be present,
// depending on the server.
func (c *noiseConn) Read(p []byte) (n int, err error) {
c.readHeaderOnce.Do(c.readHeader)
return c.reader.Read(p)
}
// readHeader reads the optional "early payload" from the server that arrives
// after the Noise handshake but before the HTTP/2 session begins.
//
// readHeader is responsible for reading the header (if present), initializing
// c.earlyPayload, closing c.earlyPayloadReady, and initializing c.reader for
// future reads.
func (c *noiseConn) readHeader() {
defer close(c.earlyPayloadReady)
setErr := func(err error) {
c.reader = returnErrReader{err}
c.earlyPayloadErr = err
}
var hdr [hdrLen]byte
if _, err := io.ReadFull(c.Conn, hdr[:]); err != nil {
setErr(err)
return
}
if string(hdr[:len(earlyPayloadMagic)]) != earlyPayloadMagic {
// No early payload. We have to return the 9 bytes read we already
// consumed.
c.reader = io.MultiReader(bytes.NewReader(hdr[:]), c.Conn)
return
}
epLen := binary.BigEndian.Uint32(hdr[len(earlyPayloadMagic):])
if epLen > 10<<20 {
setErr(errors.New("invalid early payload length"))
return
}
payBuf := make([]byte, epLen)
if _, err := io.ReadFull(c.Conn, payBuf); err != nil {
setErr(err)
return
}
if err := json.Unmarshal(payBuf, &c.earlyPayload); err != nil {
setErr(err)
return
}
c.reader = c.Conn
}
func (c *noiseConn) Close() error {
if err := c.Conn.Close(); err != nil {
return err
}
c.pool.connClosed(c.id)
return nil
}
// NoiseClient provides a http.Client to connect to tailcontrol over
// the ts2021 protocol.
type NoiseClient struct {
// Client is an HTTP client to talk to the coordination server.
// It automatically makes a new Noise connection as needed.
// It does not support node key proofs. To do that, call
// noiseClient.getConn instead to make a connection.
*http.Client
// h2t is the HTTP/2 transport we use a bit to create new
// *http2.ClientConns. We don't use its connection pool and we don't use its
// dialing. We use it for exactly one reason: its idle timeout that can only
// be configured via the HTTP/1 config. And then we call NewClientConn (with
// an existing Noise connection) on the http2.Transport which sets up an
// http2.ClientConn using that idle timeout from an http1.Transport.
h2t *http2.Transport
// sfDial ensures that two concurrent requests for a noise connection only
// produce one shared one between the two callers.
sfDial singleflight.Group[struct{}, *noiseConn]
dialer *tsdial.Dialer
dnsCache *dnscache.Resolver
privKey key.MachinePrivate
serverPubKey key.MachinePublic
host string // the host part of serverURL
httpPort string // the default port to call
httpsPort string // the fallback Noise-over-https port
// dialPlan optionally returns a ControlDialPlan previously received
// from the control server; either the function or the return value can
// be nil.
dialPlan func() *tailcfg.ControlDialPlan
logf logger.Logf
netMon *netmon.Monitor
// mu only protects the following variables.
mu sync.Mutex
last *noiseConn // or nil
nextID int
connPool map[int]*noiseConn // active connections not yet closed; see noiseConn.Close
}
// NoiseOpts contains options for the NewNoiseClient function. All fields are
// required unless otherwise specified.
type NoiseOpts struct {
// PrivKey is this node's private key.
PrivKey key.MachinePrivate
// ServerPubKey is the public key of the server.
ServerPubKey key.MachinePublic
// ServerURL is the URL of the server to connect to.
ServerURL string
// Dialer's SystemDial function is used to connect to the server.
Dialer *tsdial.Dialer
// DNSCache is the caching Resolver to use to connect to the server.
//
// This field can be nil.
DNSCache *dnscache.Resolver
// Logf is the log function to use. This field can be nil.
Logf logger.Logf
// NetMon is the network monitor that, if set, will be used to get the
// network interface state. This field can be nil; if so, the current
// state will be looked up dynamically.
NetMon *netmon.Monitor
// DialPlan, if set, is a function that should return an explicit plan
// on how to connect to the server.
DialPlan func() *tailcfg.ControlDialPlan
}
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://<host>:<port> (no trailing slash).
//
// netMon may be nil, if non-nil it's used to do faster interface lookups.
// dialPlan may be nil
func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
u, err := url.Parse(opts.ServerURL)
if err != nil {
return nil, err
}
var httpPort string
var httpsPort string
if u.Port() != "" {
// If there is an explicit port specified, trust the scheme and hope for the best
if u.Scheme == "http" {
httpPort = u.Port()
httpsPort = "443"
} else {
httpPort = "80"
httpsPort = u.Port()
}
} else {
// Otherwise, use the standard ports
httpPort = "80"
httpsPort = "443"
}
np := &NoiseClient{
serverPubKey: opts.ServerPubKey,
privKey: opts.PrivKey,
host: u.Hostname(),
httpPort: httpPort,
httpsPort: httpsPort,
dialer: opts.Dialer,
dnsCache: opts.DNSCache,
dialPlan: opts.DialPlan,
logf: opts.Logf,
netMon: opts.NetMon,
}
// Create the HTTP/2 Transport using a net/http.Transport
// (which only does HTTP/1) because it's the only way to
// configure certain properties on the http2.Transport. But we
// never actually use the net/http.Transport for any HTTP/1
// requests.
h2Transport, err := http2.ConfigureTransports(&http.Transport{
IdleConnTimeout: time.Minute,
})
if err != nil {
return nil, err
}
np.h2t = h2Transport
np.Client = &http.Client{Transport: np}
return np, nil
}
// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once
// (and must be used once) to make a single HTTP request over the noise channel
// to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
for tries := 0; tries < 3; tries++ {
conn, err := nc.getConn(ctx)
if err != nil {
return nil, nil, err
}
earlyPayloadMaybeNil, err := conn.getEarlyPayload(ctx)
if err != nil {
return nil, nil, err
}
if conn.h2cc.ReserveNewRequest() {
return conn, earlyPayloadMaybeNil, nil
}
}
return nil, nil, errors | {
select {
case <-c.earlyPayloadReady:
return c.earlyPayload, c.earlyPayloadErr
case <-ctx.Done():
return nil, ctx.Err()
}
} | identifier_body |
|
noise.go | to return
}
func (r returnErrReader) Read([]byte) (int, error) { return 0, r.err }
// Read is basically the same as controlbase.Conn.Read, but it first reads the
// "early payload" header from the server which may or may not be present,
// depending on the server.
func (c *noiseConn) Read(p []byte) (n int, err error) {
c.readHeaderOnce.Do(c.readHeader)
return c.reader.Read(p)
}
// readHeader reads the optional "early payload" from the server that arrives
// after the Noise handshake but before the HTTP/2 session begins.
//
// readHeader is responsible for reading the header (if present), initializing
// c.earlyPayload, closing c.earlyPayloadReady, and initializing c.reader for
// future reads.
func (c *noiseConn) readHeader() {
defer close(c.earlyPayloadReady)
setErr := func(err error) {
c.reader = returnErrReader{err}
c.earlyPayloadErr = err
}
var hdr [hdrLen]byte
if _, err := io.ReadFull(c.Conn, hdr[:]); err != nil {
setErr(err)
return
}
if string(hdr[:len(earlyPayloadMagic)]) != earlyPayloadMagic {
// No early payload. We have to return the 9 bytes read we already
// consumed.
c.reader = io.MultiReader(bytes.NewReader(hdr[:]), c.Conn)
return
}
epLen := binary.BigEndian.Uint32(hdr[len(earlyPayloadMagic):])
if epLen > 10<<20 {
setErr(errors.New("invalid early payload length"))
return
}
payBuf := make([]byte, epLen)
if _, err := io.ReadFull(c.Conn, payBuf); err != nil {
setErr(err)
return
}
if err := json.Unmarshal(payBuf, &c.earlyPayload); err != nil {
setErr(err)
return
}
c.reader = c.Conn
}
func (c *noiseConn) | () error {
if err := c.Conn.Close(); err != nil {
return err
}
c.pool.connClosed(c.id)
return nil
}
// NoiseClient provides a http.Client to connect to tailcontrol over
// the ts2021 protocol.
type NoiseClient struct {
// Client is an HTTP client to talk to the coordination server.
// It automatically makes a new Noise connection as needed.
// It does not support node key proofs. To do that, call
// noiseClient.getConn instead to make a connection.
*http.Client
// h2t is the HTTP/2 transport we use a bit to create new
// *http2.ClientConns. We don't use its connection pool and we don't use its
// dialing. We use it for exactly one reason: its idle timeout that can only
// be configured via the HTTP/1 config. And then we call NewClientConn (with
// an existing Noise connection) on the http2.Transport which sets up an
// http2.ClientConn using that idle timeout from an http1.Transport.
h2t *http2.Transport
// sfDial ensures that two concurrent requests for a noise connection only
// produce one shared one between the two callers.
sfDial singleflight.Group[struct{}, *noiseConn]
dialer *tsdial.Dialer
dnsCache *dnscache.Resolver
privKey key.MachinePrivate
serverPubKey key.MachinePublic
host string // the host part of serverURL
httpPort string // the default port to call
httpsPort string // the fallback Noise-over-https port
// dialPlan optionally returns a ControlDialPlan previously received
// from the control server; either the function or the return value can
// be nil.
dialPlan func() *tailcfg.ControlDialPlan
logf logger.Logf
netMon *netmon.Monitor
// mu only protects the following variables.
mu sync.Mutex
last *noiseConn // or nil
nextID int
connPool map[int]*noiseConn // active connections not yet closed; see noiseConn.Close
}
// NoiseOpts contains options for the NewNoiseClient function. All fields are
// required unless otherwise specified.
type NoiseOpts struct {
// PrivKey is this node's private key.
PrivKey key.MachinePrivate
// ServerPubKey is the public key of the server.
ServerPubKey key.MachinePublic
// ServerURL is the URL of the server to connect to.
ServerURL string
// Dialer's SystemDial function is used to connect to the server.
Dialer *tsdial.Dialer
// DNSCache is the caching Resolver to use to connect to the server.
//
// This field can be nil.
DNSCache *dnscache.Resolver
// Logf is the log function to use. This field can be nil.
Logf logger.Logf
// NetMon is the network monitor that, if set, will be used to get the
// network interface state. This field can be nil; if so, the current
// state will be looked up dynamically.
NetMon *netmon.Monitor
// DialPlan, if set, is a function that should return an explicit plan
// on how to connect to the server.
DialPlan func() *tailcfg.ControlDialPlan
}
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://<host>:<port> (no trailing slash).
//
// netMon may be nil, if non-nil it's used to do faster interface lookups.
// dialPlan may be nil
func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
u, err := url.Parse(opts.ServerURL)
if err != nil {
return nil, err
}
var httpPort string
var httpsPort string
if u.Port() != "" {
// If there is an explicit port specified, trust the scheme and hope for the best
if u.Scheme == "http" {
httpPort = u.Port()
httpsPort = "443"
} else {
httpPort = "80"
httpsPort = u.Port()
}
} else {
// Otherwise, use the standard ports
httpPort = "80"
httpsPort = "443"
}
np := &NoiseClient{
serverPubKey: opts.ServerPubKey,
privKey: opts.PrivKey,
host: u.Hostname(),
httpPort: httpPort,
httpsPort: httpsPort,
dialer: opts.Dialer,
dnsCache: opts.DNSCache,
dialPlan: opts.DialPlan,
logf: opts.Logf,
netMon: opts.NetMon,
}
// Create the HTTP/2 Transport using a net/http.Transport
// (which only does HTTP/1) because it's the only way to
// configure certain properties on the http2.Transport. But we
// never actually use the net/http.Transport for any HTTP/1
// requests.
h2Transport, err := http2.ConfigureTransports(&http.Transport{
IdleConnTimeout: time.Minute,
})
if err != nil {
return nil, err
}
np.h2t = h2Transport
np.Client = &http.Client{Transport: np}
return np, nil
}
// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once
// (and must be used once) to make a single HTTP request over the noise channel
// to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
for tries := 0; tries < 3; tries++ {
conn, err := nc.getConn(ctx)
if err != nil {
return nil, nil, err
}
earlyPayloadMaybeNil, err := conn.getEarlyPayload(ctx)
if err != nil {
return nil, nil, err
}
if conn.h2cc.ReserveNewRequest() {
return conn, earlyPayloadMaybeNil, nil
}
}
return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection")
}
// contextErr is an error that wraps another error and is used to indicate that
// the error was because a context expired.
type contextErr struct {
err error
}
func (e contextErr) Error() string {
return e.err.Error()
}
func (e contextErr) Unwrap() error {
return e.err
}
// getConn returns a noiseConn that can be used to make requests to the
// coordination server. It may return a cached connection or create a new one.
// Dials are singleflighted, so concurrent calls to getConn may only dial once.
// As such, context values may not be respected as there are no guarantees that
// the context passed to getConn is the same as the context passed to dial.
func (nc *NoiseClient) getConn(ctx context.Context) (*noiseConn, error) {
nc.mu.Lock()
if last := nc.last; last != | Close | identifier_name |
write.rs | // does, and are by populated by LLVM's default PassManagerBuilder.
// Each manager has a different set of passes, but they also share
// some common passes.
let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
let mpm = llvm::LLVMCreatePassManager();
{
let find_pass = |pass_name: &str| {
let pass_name = SmallCStr::new(pass_name);
llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
};
if config.verify_llvm_ir {
// Verification should run as the very first pass.
llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
}
let mut extra_passes = Vec::new();
let mut have_name_anon_globals_pass = false;
for pass_name in &config.passes {
if pass_name == "lint" {
// Linting should also be performed early, directly on the generated IR.
llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
continue;
}
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
for pass_name in &cgcx.plugin_passes {
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.err(&format!("a plugin asked for LLVM pass \
`{}` but LLVM does not \
recognize it", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
// to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
// we'll get errors in LLVM.
let using_thin_buffers = config.bitcode_needed();
if !config.no_prepopulate_passes {
llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0)
.unwrap_or(llvm::CodeGenOptLevel::None);
let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal ||
(cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
llvm::LLVMRustAddLastExtensionPasses(
b, extra_passes.as_ptr(), extra_passes.len() as size_t);
llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
});
have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
if using_thin_buffers && !prepare_for_thin_lto {
llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
have_name_anon_globals_pass = true;
}
} else {
// If we don't use the standard pipeline, directly populate the MPM
// with the extra passes.
for pass in extra_passes {
llvm::LLVMRustAddPass(mpm, pass);
}
}
if using_thin_buffers && !have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
This will likely cause errors in LLVM. Consider adding \
-C passes=name-anon-globals to the compiler command line.");
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen.");
}
}
}
diag_handler.abort_if_errors();
// Finally, run the actual optimization passes
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_function_passes");
time_ext(config.time_passes,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
});
}
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_module_passes");
time_ext(config.time_passes,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRunPassManager(mpm, llmod)
});
}
// Deallocate managers that we're now done with
llvm::LLVMDisposePassManager(fpm);
llvm::LLVMDisposePassManager(mpm);
}
Ok(())
}
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<CompiledModule, FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen");
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
}
// A codegen-specific pass manager is used to generate object
// files for an LLVM module.
//
// Apparently each of these pass managers is a one-shot kind of
// thing, so we create a new one for each type of output. The
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
f(cpm)
}
// If we don't have the integrated assembler, then we need to emit asm
// from LLVM and use `gcc` to create the object file.
let asm_to_obj = config.emit_obj && config.no_integrated_as;
// Change what we write and cleanup based on whether obj files are
// just llvm bitcode. In that case write bitcode, and possibly
// delete the bitcode if it wasn't requested. Don't generate the
// machine code, instead copy the .o file from the .bc
let write_bc = config.emit_bc || config.obj_is_bitcode;
let rm_bc = !config.emit_bc && config.obj_is_bitcode;
let write_obj = config.emit_obj && !config.obj_is_bitcode && !asm_to_obj;
let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_make_bitcode");
let thin = ThinBuffer::new(llmod);
let data = thin.data();
if write_bc {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_bitcode");
if let Err(e) = fs::write(&bc_out, data) {
let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
diag_handler.err(&msg);
}
}
if config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_embed_bitcode");
embed_bitcode(cgcx, llcx, llmod, Some(data));
}
if config.emit_bc_compressed {
let _timer =
cgcx.prof.generic_activity("LLVM_module_codegen_emit_compressed_bitcode");
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&module.name, data);
if let Err(e) = fs::write(&dst, data) { | let msg = format!("failed to write bytecode to {}: {}", dst.display(), e); | random_line_split |
|
write.rs | })
}
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
name: &str
) {
if !cgcx.save_temps {
return
}
unsafe {
let ext = format!("{}.bc", name);
let cgu = Some(&module.name[..]);
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path_to_c_string(&path);
let llmod = module.module_llvm.llmod();
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
}
}
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
}
impl<'a> DiagnosticHandlers<'a> {
pub fn new(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context) -> Self {
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
}
DiagnosticHandlers { data, llcx }
}
}
impl<'a> Drop for DiagnosticHandlers<'a> {
fn drop(&mut self) {
use std::ptr::null_mut;
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
drop(Box::from_raw(self.data));
}
}
}
unsafe extern "C" fn | (cgcx: &CodegenContext<LlvmCodegenBackend>,
msg: &str,
cookie: c_uint) {
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
}
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
user: *const c_void,
cookie: c_uint) {
if user.is_null() {
return
}
let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
.expect("non-UTF8 SMDiagnostic");
report_inline_asm(cgcx, &msg, cookie);
}
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
if user.is_null() {
return
}
let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
match llvm::diagnostic::Diagnostic::unpack(info) {
llvm::diagnostic::InlineAsm(inline) => {
report_inline_asm(cgcx,
&llvm::twine_to_string(inline.message),
inline.cookie);
}
llvm::diagnostic::Optimization(opt) => {
let enabled = match cgcx.remark {
Passes::All => true,
Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
};
if enabled {
diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message));
}
}
llvm::diagnostic::PGO(diagnostic_ref) |
llvm::diagnostic::Linker(diagnostic_ref) => {
let msg = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
}).expect("non-UTF8 diagnostic");
diag_handler.warn(&msg);
}
llvm::diagnostic::UnknownDiagnostic(..) => {},
}
}
// Unsafe due to LLVM calls.
pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<(), FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize");
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
let out = path_to_c_string(&out);
llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
}
if config.opt_level.is_some() {
// Create the two optimizing pass managers. These mirror what clang
// does, and are by populated by LLVM's default PassManagerBuilder.
// Each manager has a different set of passes, but they also share
// some common passes.
let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
let mpm = llvm::LLVMCreatePassManager();
{
let find_pass = |pass_name: &str| {
let pass_name = SmallCStr::new(pass_name);
llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
};
if config.verify_llvm_ir {
// Verification should run as the very first pass.
llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
}
let mut extra_passes = Vec::new();
let mut have_name_anon_globals_pass = false;
for pass_name in &config.passes {
if pass_name == "lint" {
// Linting should also be performed early, directly on the generated IR.
llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
continue;
}
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
for pass_name in &cgcx.plugin_passes {
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.err(&format!("a plugin asked for LLVM pass \
`{}` but LLVM does not \
recognize it", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
// to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
// we'll get errors in LLVM.
let using_thin_buffers = config.bitcode_needed();
if !config.no_prepopulate_passes {
llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0)
.unwrap_or(llvm::CodeGenOptLevel::None);
let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal ||
(cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
llvm::LLVMRustAddLastExtensionPasses(
b, extra_passes.as_ptr(), extra_passes.len() as size_t);
llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
});
have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
if using_thin_buffers && !prepare_for_thin_lto {
llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
have_name_anon_globals_pass = true;
}
} else {
// If we don't use the standard pipeline, directly populate the MPM
// with the extra passes.
for pass in extra_passes {
llvm::LLVMRustAddPass(mpm, pass);
}
}
if using_thin_buffers && !have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
| report_inline_asm | identifier_name |
write.rs | might have to clone the
// module to produce the asm output
let llmod = if config.emit_obj {
llvm::LLVMCloneModule(llmod)
} else {
llmod
};
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &path,
llvm::FileType::AssemblyFile)
})?;
}
if write_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_obj");
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &obj_out,
llvm::FileType::ObjectFile)
})?;
} else if asm_to_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_asm_to_obj");
let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
run_assembler(cgcx, diag_handler, &assembly, &obj_out);
if !config.emit_asm && !cgcx.save_temps {
drop(fs::remove_file(&assembly));
}
}
Ok(())
})?;
if copy_bc_to_obj {
debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
if let Err(e) = link_or_copy(&bc_out, &obj_out) {
diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
}
}
if rm_bc {
debug!("removing_bitcode {:?}", bc_out);
if let Err(e) = fs::remove_file(&bc_out) {
diag_handler.err(&format!("failed to remove bitcode: {}", e));
}
}
drop(handlers);
}
Ok(module.into_compiled_module(config.emit_obj,
config.emit_bc,
config.emit_bc_compressed,
&cgcx.output_filenames))
}
/// Embed the bitcode of an LLVM module in the LLVM module itself.
///
/// This is done primarily for iOS where it appears to be standard to compile C
/// code at least with `-fembed-bitcode` which creates two sections in the
/// executable:
///
/// * __LLVM,__bitcode
/// * __LLVM,__cmdline
///
/// It appears *both* of these sections are necessary to get the linker to
/// recognize what's going on. For us though we just always throw in an empty
/// cmdline section.
///
/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
/// embed an empty section.
///
/// Basically all of this is us attempting to follow in the footsteps of clang
/// on iOS. See #35968 for lots more info.
unsafe fn embed_bitcode(cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module,
bitcode: Option<&[u8]>) {
let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llglobal = llvm::LLVMAddGlobal(
llmod,
common::val_ty(llconst),
"rustc.embedded.module\0".as_ptr().cast(),
);
llvm::LLVMSetInitializer(llglobal, llconst);
let is_apple = cgcx.opts.target_triple.triple().contains("-ios") ||
cgcx.opts.target_triple.triple().contains("-darwin");
let section = if is_apple {
"__LLVM,__bitcode\0"
} else {
".llvmbc\0"
};
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
let llconst = common::bytes_in_context(llcx, &[]);
let llglobal = llvm::LLVMAddGlobal(
llmod,
common::val_ty(llconst),
"rustc.embedded.cmdline\0".as_ptr().cast(),
);
llvm::LLVMSetInitializer(llglobal, llconst);
let section = if is_apple {
"__LLVM,__cmdline\0"
} else {
".llvmcmd\0"
};
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
}
pub unsafe fn with_llvm_pmb(llmod: &llvm::Module,
config: &ModuleConfig,
opt_level: llvm::CodeGenOptLevel,
prepare_for_thin_lto: bool,
f: &mut dyn FnMut(&llvm::PassManagerBuilder)) {
use std::ptr;
// Create the PassManagerBuilder for LLVM. We configure it with
// reasonable defaults and prepare it to actually populate the pass
// manager.
let builder = llvm::LLVMPassManagerBuilderCreate();
let opt_size = config.opt_size.map(|x| to_llvm_opt_settings(x).1)
.unwrap_or(llvm::CodeGenOptSizeNone);
let inline_threshold = config.inline_threshold;
let pgo_gen_path = match config.pgo_gen {
SwitchWithOptPath::Enabled(ref opt_dir_path) => {
let path = if let Some(dir_path) = opt_dir_path {
dir_path.join("default_%m.profraw")
} else {
PathBuf::from("default_%m.profraw")
};
Some(CString::new(format!("{}", path.display())).unwrap())
}
SwitchWithOptPath::Disabled => {
None
}
};
let pgo_use_path = config.pgo_use.as_ref().map(|path_buf| {
CString::new(path_buf.to_string_lossy().as_bytes()).unwrap()
});
llvm::LLVMRustConfigurePassManagerBuilder(
builder,
opt_level,
config.merge_functions,
config.vectorize_slp,
config.vectorize_loop,
prepare_for_thin_lto,
pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
);
llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
if opt_size != llvm::CodeGenOptSizeNone {
llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
}
llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
// Here we match what clang does (kinda). For O0 we only inline
// always-inline functions (but don't add lifetime intrinsics), at O1 we
// inline with lifetime intrinsics, and O2+ we add an inliner with a
// thresholds copied from clang.
match (opt_level, opt_size, inline_threshold) {
(.., Some(t)) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
}
(llvm::CodeGenOptLevel::Aggressive, ..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
}
(_, llvm::CodeGenOptSizeDefault, _) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
}
(_, llvm::CodeGenOptSizeAggressive, _) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
}
(llvm::CodeGenOptLevel::None, ..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, false);
}
(llvm::CodeGenOptLevel::Less, ..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, true);
}
(llvm::CodeGenOptLevel::Default, ..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
}
(llvm::CodeGenOptLevel::Other, ..) => {
bug!("CodeGenOptLevel::Other selected")
}
}
f(builder);
llvm::LLVMPassManagerBuilderDispose(builder);
}
// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
// This is required to satisfy `dllimport` references to static data in .rlibs
// when using MSVC linker. We do this only for data, as linker can fix up
// code references on its own.
// See #26591, #27438
fn create_msvc_imps(
cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module
) {
if !cgcx.msvc_imps_needed {
return
}
// The x86 ABI seems to require that leading underscores are added to symbol
// names, so we need an extra underscore on x86. There's also a leading
// '\x01' here which disables LLVM's symbol mangling (e.g., no extra
// underscores added in front).
let prefix = if cgcx.target_arch == "x86" | {
"\x01__imp__"
} | conditional_block |
|
write.rs |
pub fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize)
{
use self::config::OptLevel::*;
match cfg {
No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
}
}
// If find_features is true this won't access `sess.crate_types` by assuming
// that `is_pie_binary` is false. When we discover LLVM target features
// `sess.crate_types` is uninitialized so we cannot access it.
pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_features: bool)
-> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync>
{
let reloc_model = get_reloc_model(sess);
let (opt_level, _) = to_llvm_opt_settings(optlvl);
let use_softfp = sess.opts.cg.soft_float;
let ffunction_sections = sess.target.target.options.function_sections;
let fdata_sections = ffunction_sections;
let code_model_arg = sess.opts.cg.code_model.as_ref().or(
sess.target.target.options.code_model.as_ref(),
);
let code_model = match code_model_arg {
Some(s) => {
match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid code model",
code_model_arg));
sess.abort_if_errors();
bug!();
}
}
}
None => llvm::CodeModel::None,
};
let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
let mut singlethread = sess.target.target.options.singlethread;
// On the wasm target once the `atomics` feature is enabled that means that
// we're no longer single-threaded, or otherwise we don't want LLVM to
// lower atomic operations to single-threaded operations.
if singlethread &&
sess.target.target.llvm_target.contains("wasm32") &&
features.iter().any(|s| *s == "+atomics")
{
singlethread = false;
}
let triple = SmallCStr::new(&sess.target.target.llvm_target);
let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
let features = features.join(",");
let features = CString::new(features).unwrap();
let is_pie_binary = !find_features && is_pie_binary(sess);
let trap_unreachable = sess.target.target.options.trap_unreachable;
let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
let asm_comments = sess.asm_comments();
Arc::new(move || {
let tm = unsafe {
llvm::LLVMRustCreateTargetMachine(
triple.as_ptr(), cpu.as_ptr(), features.as_ptr(),
code_model,
reloc_model,
opt_level,
use_softfp,
is_pie_binary,
ffunction_sections,
fdata_sections,
trap_unreachable,
singlethread,
asm_comments,
emit_stack_size_section,
)
};
tm.ok_or_else(|| {
format!("Could not create LLVM TargetMachine for triple: {}",
triple.to_str().unwrap())
})
})
}
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
name: &str
) {
if !cgcx.save_temps {
return
}
unsafe {
let ext = format!("{}.bc", name);
let cgu = Some(&module.name[..]);
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path_to_c_string(&path);
let llmod = module.module_llvm.llmod();
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
}
}
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
}
impl<'a> DiagnosticHandlers<'a> {
pub fn new(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context) -> Self {
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
}
DiagnosticHandlers { data, llcx }
}
}
impl<'a> Drop for DiagnosticHandlers<'a> {
fn drop(&mut self) {
use std::ptr::null_mut;
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
drop(Box::from_raw(self.data));
}
}
}
unsafe extern "C" fn report_inline_asm(cgcx: &CodegenContext<LlvmCodegenBackend>,
msg: &str,
cookie: c_uint) {
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
}
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
user: *const c_void,
cookie: c_uint) {
if user.is_null() {
return
}
let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
.expect("non-UTF8 SMDiagnostic");
report_inline_asm(cgcx, &msg, cookie);
}
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
if user.is_null() {
return
}
let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
match llvm::diagnostic::Diagnostic::unpack(info) {
llvm::diagnostic::InlineAsm(inline) => {
report_inline_asm(cgcx,
&llvm::twine_to_string(inline.message),
inline.cookie);
}
llvm::diagnostic::Optimization(opt) => {
let enabled = match cgcx.remark {
Passes::All => true,
Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
};
if enabled {
diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message));
}
}
llvm::diagnostic::PGO(diagnostic_ref) |
llvm::diagnostic::Linker(diagnostic_ref) => {
let msg = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
}).expect("non-UTF8 diagnostic");
diag_handler.warn(&msg);
}
llvm::diagnostic::UnknownDiagnostic(..) => {},
}
}
// Unsafe due to LLVM calls.
pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<(), FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize");
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
let out = path_to_c_string(&out);
llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
}
if config.opt_level.is_some() {
// Create the two optimizing pass managers. These mirror what clang
// does, and are by populated by LLVM's default PassManagerBuilder.
// Each manager has a different set of passes, but they also share
// some common passes.
let fpm = | {
target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE), find_features)()
.unwrap_or_else(|err| {
llvm_err(tcx.sess.diagnostic(), &err).raise()
})
} | identifier_body |
|
lib.rs | and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
time: DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync + 'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
SchedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
| lse {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn | *scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} e | conditional_block |
lib.rs | and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
time: DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync + 'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
Sc | scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
*scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} else {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn | hedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This | identifier_body |
lib.rs | ise and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
| DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync + 'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
SchedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
*scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} else {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn | time: | identifier_name |
lib.rs | ise and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
time: DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync + 'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
SchedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
*scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} else {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if !scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync + 'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
|
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn | *state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} | random_line_split |
project.py | _window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
else:
hog_features = hog_feat1
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
test_prediction = svc.predict(hog_features)
if test_prediction == 1 or show_all_rectangles:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
rectangles.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)))
return rectangles
def drawOnImage(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
random_color = False
# Iterate through the bounding boxes
for bbox in bboxes:
if color == 'random' or random_color:
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
random_color = True
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def Test():
test_images = glob.glob('./test_images/test*.jpg')
fig, axs = plt.subplots(3, 2, figsize=(16,14))
fig.subplots_adjust(hspace = .004, wspace=.002)
axs = axs.ravel()
for i, im in enumerate(test_images):
axs[i].imshow(Pipeline(mpimg.imread(im)))
axs[i].axis('off')
plt.show()
'''
car_images, noncar_images = LoadTrainData(False)
car_img = mpimg.imread(car_images[5])
_, car_dst = getHOG(car_img[:,:,2], 9, 8, 8, vis=True, feature_vec=True)
noncar_img = mpimg.imread(noncar_images[5])
_, noncar_dst = getHOG(noncar_img[:,:,2], 9, 8, 8, vis=True, feature_vec=True)
# Visualize
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(7,7))
f.subplots_adjust(hspace = .4, wspace=.2)
ax1.imshow(car_img)
ax1.set_title('Car Image', fontsize=16)
ax2.imshow(car_dst, cmap='gray')
ax2.set_title('Car HOG', fontsize=16)
ax3.imshow(noncar_img)
ax3.set_title('Non-Car Image', fontsize=16)
ax4.imshow(noncar_dst, cmap='gray')
ax4.set_title('Non-Car HOG', fontsize=16)
plt.show()
'''
return
def CombineWindowSearches(test_img):
#test_img = mpimg.imread('./test_images/test1.jpg')
rectangles = []
colorspace = 'YUV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 11
pix_per_cell = 16
cell_per_block = 2
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
ystart = 400
ystop = 464
scale = 1.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 416
ystop = 480
scale = 1.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 400
ystop = 496
scale = 1.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 432
ystop = 528
scale = 1.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 400
ystop = 528
scale = 2.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 432
ystop = 560
scale = 2.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 400
ystop = 596
scale = 3.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 464
ystop = 660
scale = 3.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
# apparently this is the best way to flatten a list of lists
rectangles = [item for sublist in rectangles for item in sublist]
'''
test_img_rects = drawOnImage(test_img, rectangles, color='random', thick=2)
plt.figure(figsize=(10,10))
plt.imshow(test_img_rects)
plt.show()
'''
return rectangles
def HeatMap(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def ThresholdImage(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def LabelImage(heatmap_img):
ThresholdImage(heatmap_img, 1)
labels = label(heatmap_img)
return labels
def DrawFinal(img, labels):
# Iterate through all detected cars
rects = []
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
rects.append(bbox)
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image and final rectangles
return img, rects
def Pipeline(img):
rectangles = CombineWindowSearches(img)
if len(rectangles) > 0:
det.add_rects(rectangles)
heatmap_img = np.zeros_like(img[:,:,0])
for rect_set in det.prev_rects:
heatmap_img = HeatMap(heatmap_img, rect_set)
heatmap_img = ThresholdImage(heatmap_img, 1 + len(det.prev_rects)//2)
labels = LabelImage(heatmap_img)
draw_img, rect = DrawFinal(np.copy(img), labels)
return draw_img
def ProcessVideo():
video_output1 = 'project_video_output.mp4'
video_input1 = VideoFileClip('project_video.mp4')#.subclip(22,26)
processed_video = video_input1.fl_image(Pipeline)
processed_video.write_videofile(video_output1, audio=False)
return
class Vehicle_Detect():
def __init__(self):
# history of rectangles previous n frames
self.prev_rects = []
def add_rects(self, rects):
| self.prev_rects.append(rects)
if len(self.prev_rects) > 15:
# throw out oldest rectangle set(s)
self.prev_rects = self.prev_rects[len(self.prev_rects)-15:] | identifier_body |
|
project.py | image if other than 1.0 scale
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
# select colorspace channel for HOG
if hog_channel == 'ALL':
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
else:
ch1 = ctrans_tosearch[:,:,hog_channel]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)+1 #-1
nyblocks = (ch1.shape[0] // pix_per_cell)+1 #-1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell)-1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = getHOG(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
if hog_channel == 'ALL':
hog2 = getHOG(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = getHOG(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
if hog_channel == 'ALL':
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
else:
hog_features = hog_feat1
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
test_prediction = svc.predict(hog_features)
if test_prediction == 1 or show_all_rectangles:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
rectangles.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)))
return rectangles
def drawOnImage(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
random_color = False
# Iterate through the bounding boxes
for bbox in bboxes:
if color == 'random' or random_color:
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
random_color = True
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def Test():
test_images = glob.glob('./test_images/test*.jpg')
fig, axs = plt.subplots(3, 2, figsize=(16,14))
fig.subplots_adjust(hspace = .004, wspace=.002)
axs = axs.ravel()
for i, im in enumerate(test_images):
axs[i].imshow(Pipeline(mpimg.imread(im)))
axs[i].axis('off')
plt.show()
'''
car_images, noncar_images = LoadTrainData(False)
car_img = mpimg.imread(car_images[5])
_, car_dst = getHOG(car_img[:,:,2], 9, 8, 8, vis=True, feature_vec=True)
noncar_img = mpimg.imread(noncar_images[5])
_, noncar_dst = getHOG(noncar_img[:,:,2], 9, 8, 8, vis=True, feature_vec=True)
# Visualize
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(7,7))
f.subplots_adjust(hspace = .4, wspace=.2)
ax1.imshow(car_img)
ax1.set_title('Car Image', fontsize=16)
ax2.imshow(car_dst, cmap='gray')
ax2.set_title('Car HOG', fontsize=16)
ax3.imshow(noncar_img)
ax3.set_title('Non-Car Image', fontsize=16)
ax4.imshow(noncar_dst, cmap='gray')
ax4.set_title('Non-Car HOG', fontsize=16)
plt.show()
'''
return
def CombineWindowSearches(test_img):
#test_img = mpimg.imread('./test_images/test1.jpg')
rectangles = []
colorspace = 'YUV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 11
pix_per_cell = 16
cell_per_block = 2
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
ystart = 400
ystop = 464
scale = 1.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 416
ystop = 480
scale = 1.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 400
ystop = 496
scale = 1.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 432
ystop = 528
scale = 1.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 400
ystop = 528
scale = 2.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 432
ystop = 560
scale = 2.0
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 400
ystop = 596
scale = 3.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
ystart = 464
ystop = 660
scale = 3.5
rectangles.append(SlidingWindow(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
orient, pix_per_cell, cell_per_block, None, None))
# apparently this is the best way to flatten a list of lists
rectangles = [item for sublist in rectangles for item in sublist]
'''
test_img_rects = drawOnImage(test_img, rectangles, color='random', thick=2)
plt.figure(figsize=(10,10))
plt.imshow(test_img_rects)
plt.show()
'''
return rectangles
def HeatMap(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def | ThresholdImage | identifier_name |
|
project.py | _per_block=2, hog_channel=0):
# Create a list to append feature vectors to | # apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(getHOG(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = getHOG(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
features.append(hog_features)
# Return list of feature vectors
return features
def PrepareData():
colorspace = 'YUV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 11
pix_per_cell = 16
cell_per_block = 2
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
car_images, noncar_images = LoadTrainData(False)
car_features = getFeatures(car_images, cspace=colorspace, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
hog_channel=hog_channel)
notcar_features = getFeatures(noncar_images, cspace=colorspace, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
hog_channel=hog_channel)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=rand_state)
return X_train, y_train, X_test, y_test
def BuildAClassifier():
X_train, y_train, X_test, y_test = PrepareData()
clf = LinearSVC()
clf.fit(X_train, y_train)
return clf, X_train, y_train, X_test, y_test
def EvaluateClassifier():
clf, X_train, y_train, X_test, y_test = BuildAClassifier()
pred = clf.predict(X_test)
accuracy = score(y_test, pred)
print('the accuracy is :-', accuracy)
return accuracy
def SlidingWindow(img, ystart, ystop, scale, cspace, hog_channel, svc, X_scaler, orient,
pix_per_cell, cell_per_block, spatial_size, hist_bins, show_all_rectangles=False):
# array of rectangles where cars were detected
rectangles = []
img = img.astype(np.float32)/255
img_tosearch = img[ystart:ystop,:,:]
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YCrCb)
else: ctrans_tosearch = np.copy(img)
# rescale image if other than 1.0 scale
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
# select colorspace channel for HOG
if hog_channel == 'ALL':
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
else:
ch1 = ctrans_tosearch[:,:,hog_channel]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)+1 #-1
nyblocks = (ch1.shape[0] // pix_per_cell)+1 #-1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell)-1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = getHOG(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
if hog_channel == 'ALL':
hog2 = getHOG(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = getHOG(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
if hog_channel == 'ALL':
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
else:
hog_features = hog_feat1
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
test_prediction = svc.predict(hog_features)
if test_prediction == 1 or show_all_rectangles:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
rectangles.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)))
return rectangles
def drawOnImage(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
random_color = False
# Iterate through the bounding boxes
for bbox in bboxes:
if color == 'random' or random_color:
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
random_color = True
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def Test():
test_images = glob.glob('./test_images/test*.jpg')
fig, axs = plt.subplots(3, 2, figsize=(16,14))
fig.subplots_adjust(hspace = .004, wspace=.002)
axs = axs.ravel()
for i, im in enumerate(test_images):
axs[i].imshow(Pipeline(mpimg.imread(im)))
axs[i].axis('off')
plt.show()
'''
car_images, noncar_images = LoadTrainData(False)
car_img = mpimg.imread(car_images[5])
_, car_dst = getHOG(car_img[:,:,2], 9, 8, 8, vis=True, feature_vec=True)
noncar_img = mpimg.imread | features = []
# Iterate through the list of images
for file in imgs:
# Read in each one by one
image = mpimg.imread(file) | random_line_split |
project.py | _per_block=2, hog_channel=0):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
# Read in each one by one
image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(getHOG(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = getHOG(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
features.append(hog_features)
# Return list of feature vectors
return features
def PrepareData():
colorspace = 'YUV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 11
pix_per_cell = 16
cell_per_block = 2
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
car_images, noncar_images = LoadTrainData(False)
car_features = getFeatures(car_images, cspace=colorspace, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
hog_channel=hog_channel)
notcar_features = getFeatures(noncar_images, cspace=colorspace, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
hog_channel=hog_channel)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=rand_state)
return X_train, y_train, X_test, y_test
def BuildAClassifier():
X_train, y_train, X_test, y_test = PrepareData()
clf = LinearSVC()
clf.fit(X_train, y_train)
return clf, X_train, y_train, X_test, y_test
def EvaluateClassifier():
clf, X_train, y_train, X_test, y_test = BuildAClassifier()
pred = clf.predict(X_test)
accuracy = score(y_test, pred)
print('the accuracy is :-', accuracy)
return accuracy
def SlidingWindow(img, ystart, ystop, scale, cspace, hog_channel, svc, X_scaler, orient,
pix_per_cell, cell_per_block, spatial_size, hist_bins, show_all_rectangles=False):
# array of rectangles where cars were detected
rectangles = []
img = img.astype(np.float32)/255
img_tosearch = img[ystart:ystop,:,:]
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YCrCb)
else: ctrans_tosearch = np.copy(img)
# rescale image if other than 1.0 scale
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
# select colorspace channel for HOG
if hog_channel == 'ALL':
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
else:
ch1 = ctrans_tosearch[:,:,hog_channel]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)+1 #-1
nyblocks = (ch1.shape[0] // pix_per_cell)+1 #-1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell)-1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = getHOG(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
if hog_channel == 'ALL':
|
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
if hog_channel == 'ALL':
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
else:
hog_features = hog_feat1
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
test_prediction = svc.predict(hog_features)
if test_prediction == 1 or show_all_rectangles:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
rectangles.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)))
return rectangles
def drawOnImage(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
random_color = False
# Iterate through the bounding boxes
for bbox in bboxes:
if color == 'random' or random_color:
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
random_color = True
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def Test():
test_images = glob.glob('./test_images/test*.jpg')
fig, axs = plt.subplots(3, 2, figsize=(16,14))
fig.subplots_adjust(hspace = .004, wspace=.002)
axs = axs.ravel()
for i, im in enumerate(test_images):
axs[i].imshow(Pipeline(mpimg.imread(im)))
axs[i].axis('off')
plt.show()
'''
car_images, noncar_images = LoadTrainData(False)
car_img = mpimg.imread(car_images[5])
_, car_dst = getHOG(car_img[:,:,2], 9, 8, 8, vis=True, feature_vec=True)
noncar_img = mpimg | hog2 = getHOG(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = getHOG(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False) | conditional_block |
storage_service.go | , ",")
if sc.ScType == models.ScTypeUnique {
// 处理独占存储
var clusterCount int64
clusterCount, err = ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance))
if err != nil {
return err
}
if clusterCount > 0 {
return response.NewMsg("This storage is already in use and cannot be modified", "此存储已被使用,无法修改")
}
if len(userStrList) <= 0 {
_, err = ss.Engine.Where("sc_id = ?", id).Delete(new(models.ScUser))
return err
}
if len(userStrList) != 1 {
return errors.New("unique-storage only assign one user")
}
var userId int
userId, err = strconv.Atoi(userStrList[0])
if err != nil {
return err
}
scUser := models.ScUser{UserId: userId, ScId: id}
if len(dbUsers) == 0 {
_, err = ss.Engine.Insert(&scUser)
} else {
_, err = ss.Engine.Where("sc_id = ?", id).Update(&scUser)
}
} else {
// <userId,index>
dbUsersM := map[int]int{}
for i := range dbUsers {
dbUsersM[dbUsers[i].UserId] = i
}
insertList := make([]models.ScUser, 0)
for i := range userStrList {
if len(userStrList[i]) == 0 {
continue
}
var userId, err = strconv.Atoi(userStrList[i])
if err != nil {
utils.LoggerError(err)
continue
}
if _, ok := dbUsersM[userId]; ok {
// 数据库已存在直接跳过,并标记为-1
dbUsersM[userId] = -1
continue
}
insertList = append(insertList, models.ScUser{ScId: id, UserId: userId})
}
for _, v := range dbUsersM {
if v != -1 {
clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).And("user_id = ?", dbUsers[v].UserId).Count(new(models.ClusterInstance))
if err != nil {
return err
}
if clusterCount > 0 {
u := models.User{}
_, _ = ss.Engine.ID(dbUsers[v].UserId).Cols("user_name").Get(&u)
return response.NewMsg(fmt.Sprintf("%v have already used this storage, this user cannot be deleted", u.UserName), fmt.Sprintf("%v已使用此存储,不能删除此用户", u.UserName))
}
}
}
if len(insertList) > 0 {
_, err := ss.Engine.Insert(&insertList)
if err != nil {
return err
}
}
for _, v := range dbUsersM {
if v != -1 {
_, _ = ss.Engine.ID(dbUsers[v].Id).Delete(new(models.ScUser))
}
}
}
}
_, err = ss.Engine.ID(id).Cols("assign_all").Update(&models.Sc{AssignAll: assignAll})
return err
}
func (ss *storageService) List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) {
scPv := make([]models.Sc, 0)
var count int64
if userId <= 0 && len(userTag) != 0 {
u := models.User{}
_, _ = ss.Engine.Where("user_tag = ?", userTag).Cols("id").Get(&u)
userId = u.Id
}
if userId > 0 {
scList := make([]models.ScUser, 0)
err := ss.Engine.Where("user_id = ?", userId).Find(&scList)
utils.LoggerError(err)
for _, v := range scList {
sc := models.Sc{Id: v.ScId}
_, err = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Get(&sc)
utils.LoggerError(err)
if len(sc.Name) > 0 {
scPv = append(scPv, sc)
}
}
assignAllSc := make([]models.Sc, 0)
err = ss.Engine.Where("assign_all = true").Find(&assignAllSc)
utils.LoggerError(err)
if len(assignAllSc) > 0 {
scPv = append(scPv, assignAllSc...)
}
count = int64(len(scPv))
if utils.MustInt(page, pageSize) {
min := pageSize * (page - 1)
max := min + pageSize
scPv = scPv[min:utils.Min(max, len(scPv))]
}
} else {
err := ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&scPv)
utils.LoggerError(err)
count, _ = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Count(&models.Sc{})
}
scReturn := make([]models.ReturnSc, len(scPv))
for i, sc := range scPv {
pv := make([]models.PersistentVolume, 0)
if sc.ScType == models.ScTypeUnique {
err := ss.Engine.Where(" sc_id = ?", sc.Id).OrderBy("id").Find(&pv)
utils.LoggerError(err)
sc.NodeNum = len(pv)
}
cluster := make([]models.ClusterInstance, 0)
err := ss.Engine.Where("sc_name = ?", sc.Name).Omit("yaml_text").Find(&cluster)
utils.LoggerError(err)
var scUserRaw json.RawMessage
if sc.AssignAll {
scUserRaw = []byte("-1")
} else {
scUser := make([]models.ScUser, 0)
err = ss.Engine.Where("sc_id = ?", sc.Id).Find(&scUser)
utils.LoggerError(err)
for i, user := range scUser {
u := models.User{Id: user.UserId}
_, err = ss.Engine.Get(&u)
utils.LoggerError(err)
scUser[i].UserName = u.UserName
}
scUserRaw, _ = json.Marshal(scUser)
}
scReturn[i] = models.ReturnSc{Sc: sc, Children: pv, Cluster: cluster, ScUser: scUserRaw}
}
if isFilter {
for i, sc := range scReturn {
if sc.ScType == models.ScTypeUnique && len(sc.Cluster) > 0 {
scReturn = append(scReturn[0:i], scReturn[i+1:]...)
}
}
}
return scReturn, count
}
func (ss *storageService) Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) {
sc := models.Sc{
Name: scName,
ScType: scType,
NodeNum: nodeNum,
ReclaimPolicy: reclaimPolicy,
Describe: remark,
OrgTag: orgTag,
UserTag: userTag,
AssignAll: userIdStr == "-1",
}
namespace := ss.cs.GetNameSpace()
// 独有存储,在k8s里面新建
if scType == "unique-storage" {
reclaimPolicyCore := core1.PersistentVolumeReclaimPolicy(reclaimPolicy)
scConfig := storage1.StorageClass{
TypeMeta: meta1.TypeMeta{
Kind: "StorageClass",
APIVersion: "storage.k8s.io/v1",
},
ObjectMeta: meta1.ObjectMeta{
Name: scName,
},
Provisioner: "kubernetes.io/no-provisioner",
ReclaimPolicy: &reclaimPolicyCore,
}
err := ss.cs.CreateOption("sc", namespace, &scConfig, meta1.CreateOptions{})
if err != nil {
return sc, err
}
} else {
err, scAddr := ss.cs.GetResources("sc", scName, namespace, meta1.GetOptions{})
if err != nil {
return sc, err
}
if value, ok := (*scAddr).(*storage1.StorageClass); ok {
sc.ReclaimPolicy = string(*value.ReclaimPolicy)
}
}
_, err := ss.Engine.Insert(&sc)
if err != nil || sc.AssignAll {
return sc, err
}
userIds := strings.Split(userIdStr, ",")
su := make([]models.ScUser, 0)
for i := range userIds {
if len(userIds[i]) == 0 {
continue
}
id, err := strconv.Atoi(userIds[i])
if err != nil {
utils.LoggerError(err)
continue
}
su = append(su, models.ScUser{UserId: id, ScId: sc.Id})
} | random_line_split |
||
storage_service.go | _, err = ss.Engine.Get(&u)
utils.LoggerError(err)
scUser[i].UserName = u.UserName
}
scUserRaw, _ = json.Marshal(scUser)
}
scReturn[i] = models.ReturnSc{Sc: sc, Children: pv, Cluster: cluster, ScUser: scUserRaw}
}
if isFilter {
for i, sc := range scReturn {
if sc.ScType == models.ScTypeUnique && len(sc.Cluster) > 0 {
scReturn = append(scReturn[0:i], scReturn[i+1:]...)
}
}
}
return scReturn, count
}
func (ss *storageService) Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) {
sc := models.Sc{
Name: scName,
ScType: scType,
NodeNum: nodeNum,
ReclaimPolicy: reclaimPolicy,
Describe: remark,
OrgTag: orgTag,
UserTag: userTag,
AssignAll: userIdStr == "-1",
}
namespace := ss.cs.GetNameSpace()
// 独有存储,在k8s里面新建
if scType == "unique-storage" {
reclaimPolicyCore := core1.PersistentVolumeReclaimPolicy(reclaimPolicy)
scConfig := storage1.StorageClass{
TypeMeta: meta1.TypeMeta{
Kind: "StorageClass",
APIVersion: "storage.k8s.io/v1",
},
ObjectMeta: meta1.ObjectMeta{
Name: scName,
},
Provisioner: "kubernetes.io/no-provisioner",
ReclaimPolicy: &reclaimPolicyCore,
}
err := ss.cs.CreateOption("sc", namespace, &scConfig, meta1.CreateOptions{})
if err != nil {
return sc, err
}
} else {
err, scAddr := ss.cs.GetResources("sc", scName, namespace, meta1.GetOptions{})
if err != nil {
return sc, err
}
if value, ok := (*scAddr).(*storage1.StorageClass); ok {
sc.ReclaimPolicy = string(*value.ReclaimPolicy)
}
}
_, err := ss.Engine.Insert(&sc)
if err != nil || sc.AssignAll {
return sc, err
}
userIds := strings.Split(userIdStr, ",")
su := make([]models.ScUser, 0)
for i := range userIds {
if len(userIds[i]) == 0 {
continue
}
id, err := strconv.Atoi(userIds[i])
if err != nil {
utils.LoggerError(err)
continue
}
su = append(su, models.ScUser{UserId: id, ScId: sc.Id})
}
if userId > 0 {
su = append(su, models.ScUser{UserId: userId, ScId: sc.Id})
}
if len(su) > 0 {
_, _ = ss.Engine.Insert(&su)
}
return sc, nil
}
func (ss *storageService) Update(id int, remake string, nodeNum int) error {
sc := models.Sc{
Id: id,
Describe: remake,
NodeNum: nodeNum,
}
_, err := ss.Engine.ID(sc.Id).Update(&sc)
return err
}
func (ss *storageService) Delete(id int) error {
if id <= 0 {
return errors.New("storage id must > 0")
}
sc := models.Sc{
Id: id,
}
exist, err := ss.Engine.Get(&sc)
if err != nil {
return err
}
if !exist {
return nil
}
clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance))
if err != nil {
return err
}
if clusterCount > 0 {
return response.NewMsg("This storage is occupied by the cluster and cannot be deleted", "此存储被集群占用,无法删除!")
}
if sc.ScType == models.ScTypeUnique {
pvCount, err := ss.Engine.Where("sc_id = ?", id).Count(new(models.PersistentVolume))
if err != nil {
return err
}
if pvCount > 0 {
return response.NewMsg("This store has pv and cannot be deleted", "请先删除此存储下的pv!")
}
err = ss.cs.DeleteOption("sc", sc.Name, ss.cs.GetNameSpace(), meta1.DeleteOptions{})
if err != nil {
return err
}
}
_, err = ss.Engine.ID(sc.Id).Delete(&sc)
_, _ = ss.Engine.Where("sc_id = ?", sc.Id).Delete(new(models.ScUser))
return err
}
func (ss *storageService) PvAdd(storageId int, pvName string, mountPoint string, iqn string, lun int, size string, userTag string, orgTag string, namespace string) (bool, models.PersistentVolume, string) {
ipAddr := ""
port := ""
if strings.Contains(mountPoint, ":") {
ipAddr = strings.Split(mountPoint, ":")[0]
port = strings.Split(mountPoint, ":")[1]
} else {
return false, models.PersistentVolume{}, "mountPoint format error"
}
pv := models.PersistentVolume{
Name: pvName,
ScId: storageId,
Lun: lun,
Capacity: size,
Iqn: iqn,
IpAddr: ipAddr,
Port: port,
UserTag: userTag,
OrgTag: orgTag,
}
sc := models.Sc{Id: storageId}
success, err := ss.Engine.Get(&sc)
utils.LoggerError(err)
if !success {
if err != nil {
return success, pv, err.Error()
} else {
return success, pv, ""
}
}
scConfig := core1.PersistentVolume{
TypeMeta: meta1.TypeMeta{
Kind: "PersistentVolume",
APIVersion: "v1",
},
ObjectMeta: meta1.ObjectMeta{
Name: pvName,
},
Spec: core1.PersistentVolumeSpec{
Capacity: core1.ResourceList{
core1.ResourceName("storage"): resource.MustParse(fmt.Sprintf("%sGi", size)),
},
AccessModes: []core1.PersistentVolumeAccessMode{core1.PersistentVolumeAccessMode("ReadWriteOnce")},
StorageClassName: sc.Name,
PersistentVolumeSource: core1.PersistentVolumeSource{
ISCSI: &core1.ISCSIPersistentVolumeSource{
TargetPortal: mountPoint,
IQN: iqn,
ISCSIInterface: "iser",
Lun: int32(lun),
FSType: "xfs",
ReadOnly: false,
},
},
},
}
err = ss.cs.CreateOption("pv", namespace, &scConfig, meta1.CreateOptions{})
if err != nil {
return false, pv, err.Error()
}
_, err = ss.Engine.Insert(&pv)
if err != nil {
return false, pv, err.Error()
}
return true, pv, ""
}
func (ss *storageService) PvDelete(id int) (err error) {
pv := models.PersistentVolume{Id: id}
hasPV, err := ss.Engine.Get(&pv)
if !hasPV {
return fmt.Errorf("Not found pv %v error: %s ", id, err)
}
if pv.Status == string(core1.VolumeBound) {
return errors.New("Bound status cannot be deleted ")
}
sc := models.Sc{Id: pv.ScId}
hasSc, err := ss.Engine.Get(&sc)
if !hasSc {
return fmt.Errorf("Not found sc %v error: %s ", pv.ScId, err)
}
if sc.ScType != "shared-storage" {
// 判断没有没cluster集群占用
existCluster, err := ss.Engine.Exist(&models.ClusterInstance{ScName: sc.Name})
if err != nil {
return err
}
if existCluster {
return errors.New("There are clusters in this PV ")
}
}
err = ss.cs.DeleteOption("pv", pv.Name, "", meta1.DeleteOptions{})
if err != nil && !utils.ErrorContains(err, "not found") {
return
}
_, err = ss.Engine.ID(pv.Id).Delete(&pv)
if err != nil {
return
}
return nil
}
func (ss *storageService) SelectOneScByName(name string) (models.Sc, bool) {
var sc models.Sc
_, err := ss.Engine.Where(" name = ? ", name).Get(&sc)
utils.LoggerError(err)
return sc, err == nil
}
func (ss *storageService) SelectOnePvByName(name string) (models.PersistentVolume, bool) {
var pv models.PersistentVolume
_, err := ss.Engine.Where(" name = ? ", name).Get(&pv)
utils.LoggerError(err)
return pv, err == nil
}
func (ss *storageService) UserRegister(userId int, scList []map[string]interface{}) e | rror {
session : | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.